From 408dcd401bc09371b47b6adaa0ff0251d89e66d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Jul 2025 13:49:55 +0000 Subject: [PATCH] build(deps): bump github.com/containers/image/v5 from 5.33.1 to 5.36.0 Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.33.1 to 5.36.0. - [Release notes](https://github.com/containers/image/releases) - [Commits](https://github.com/containers/image/compare/v5.33.1...v5.36.0) --- updated-dependencies: - dependency-name: github.com/containers/image/v5 dependency-version: 5.36.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 117 +- go.sum | 341 +- vendor/dario.cat/mergo/FUNDING.json | 7 + vendor/dario.cat/mergo/README.md | 5 - vendor/dario.cat/mergo/SECURITY.md | 4 +- vendor/github.com/BurntSushi/toml/README.md | 2 +- vendor/github.com/BurntSushi/toml/decode.go | 31 +- vendor/github.com/BurntSushi/toml/encode.go | 46 +- vendor/github.com/BurntSushi/toml/error.go | 65 +- vendor/github.com/BurntSushi/toml/lex.go | 33 +- vendor/github.com/BurntSushi/toml/meta.go | 3 - vendor/github.com/BurntSushi/toml/parse.go | 17 +- .../Microsoft/hcsshim/.clang-format | 12 + .../Microsoft/hcsshim/.golangci.yml | 43 +- vendor/github.com/Microsoft/hcsshim/Makefile | 116 +- .../Microsoft/hcsshim/Makefile.bootfiles | 197 + vendor/github.com/Microsoft/hcsshim/README.md | 2 +- .../Microsoft/hcsshim/internal/hcs/process.go | 9 +- .../hcsshim/internal/hcs/schema2/chipset.go | 2 + .../hcs/schema2/{cim_mount.go => cimfs.go} | 8 - .../hcsshim/internal/hcs/schema2/firmware.go | 8 + .../hcsshim/internal/hcs/schema2/memory_2.go | 49 - .../hcs/schema2/memory_backing_type.go | 21 + .../hcsshim/internal/hcs/schema2/numa.go | 19 + .../hcsshim/internal/hcs/schema2/numa_node.go | 17 + .../internal/hcs/schema2/numa_node_memory.go | 19 + .../hcs/schema2/numa_node_processor.go | 17 + .../internal/hcs/schema2/numa_processors.go | 21 + .../internal/hcs/schema2/numa_setting.go | 21 + .../internal/hcs/schema2/processor_2.go | 23 - .../internal/hcs/schema2/properties.go | 2 + .../internal/hcs/schema2/property_type.go | 1 + .../hcsshim/internal/hcs/schema2/topology.go | 12 +- .../internal/hcs/schema2/virtual_machine.go | 39 +- .../hcs/schema2/virtual_machine_memory.go | 33 + .../hcs/schema2/virtual_machine_processor.go | 21 + .../hcs/schema2/virtual_pci_device.go | 3 +- .../internal/hcs/schema2/virtual_slit_type.go | 23 + .../hcs/schema2/windows_crash_reporting.go | 2 + .../Microsoft/hcsshim/internal/hcs/system.go | 21 +- .../hcsshim/internal/hns/hnsaccelnet.go | 4 +- .../hcsshim/internal/jobobject/jobobject.go | 20 +- .../hcsshim/internal/jobobject/limits.go | 1 + .../Microsoft/hcsshim/internal/log/context.go | 28 +- .../Microsoft/hcsshim/internal/log/format.go | 4 +- .../Microsoft/hcsshim/internal/log/scrub.go | 15 +- .../hcsshim/internal/vmcompute/vmcompute.go | 2 +- .../hcsshim/internal/wclayer/legacy.go | 1 + .../hcsshim/internal/winapi/cimfs.go | 11 + .../internal/winapi/zsyscall_windows.go | 146 + .../osversion/platform_compat_windows.go | 22 +- .../hcsshim/osversion/windowsbuilds.go | 7 + .../asaskevich/govalidator/.gitignore | 15 - .../asaskevich/govalidator/.travis.yml | 12 - .../asaskevich/govalidator/CODE_OF_CONDUCT.md | 43 - .../asaskevich/govalidator/CONTRIBUTING.md | 63 - .../github.com/asaskevich/govalidator/LICENSE | 21 - .../asaskevich/govalidator/README.md | 622 - .../asaskevich/govalidator/arrays.go | 87 - .../asaskevich/govalidator/converter.go | 81 - .../github.com/asaskevich/govalidator/doc.go | 3 - .../asaskevich/govalidator/error.go | 47 - .../asaskevich/govalidator/numerics.go | 100 - .../asaskevich/govalidator/patterns.go | 113 - .../asaskevich/govalidator/types.go | 656 - .../asaskevich/govalidator/utils.go | 270 - .../asaskevich/govalidator/validator.go | 1768 -- .../asaskevich/govalidator/wercker.yml | 15 - .../containerd/errdefs/pkg/errhttp/http.go | 96 + .../stargz-snapshotter/estargz/testutil.go | 15 +- .../containerd/typeurl/v2/README.md | 6 + .../github.com/containerd/typeurl/v2/types.go | 89 +- .../containerd/typeurl/v2/types_gogo.go | 68 + .../containers/image/v5/copy/copy.go | 8 + .../containers/image/v5/copy/multiple.go | 2 +- .../containers/image/v5/copy/single.go | 49 +- .../image/v5/directory/directory_dest.go | 1 + .../image/v5/docker/archive/transport.go | 3 + .../containers/image/v5/docker/body_reader.go | 10 +- .../image/v5/docker/daemon/client.go | 12 + .../image/v5/docker/daemon/daemon_dest.go | 2 +- .../v5/docker/daemon/daemon_transport.go | 9 +- .../image/v5/docker/distribution_error.go | 31 +- .../image/v5/docker/docker_client.go | 57 +- .../image/v5/docker/docker_image_dest.go | 11 +- .../image/v5/docker/docker_image_src.go | 6 +- .../containers/image/v5/docker/errors.go | 6 +- .../image/v5/docker/internal/tarfile/dest.go | 1 + .../v5/docker/internal/tarfile/writer.go | 4 +- .../image/v5/docker/paths_common.go | 1 - .../image/v5/docker/paths_freebsd.go | 1 - .../image/v5/docker/registries_d.go | 6 + .../image/v5/docker/wwwauthenticate.go | 17 +- .../image/v5/internal/image/unparsed.go | 6 + .../stubs/original_oci_config.go | 16 + .../v5/internal/imagedestination/wrapper.go | 1 + .../internal/manifest/docker_schema2_list.go | 32 +- .../image/v5/internal/manifest/oci_index.go | 10 +- .../image/v5/internal/private/private.go | 15 + .../containers/image/v5/internal/set/set.go | 11 +- .../image/v5/manifest/docker_schema1.go | 12 +- .../containers/image/v5/manifest/oci.go | 5 +- .../image/v5/oci/archive/oci_dest.go | 26 +- .../image/v5/oci/archive/oci_transport.go | 4 +- .../image/v5/oci/internal/oci_util.go | 31 +- .../image/v5/oci/layout/oci_delete.go | 12 +- .../image/v5/oci/layout/oci_dest.go | 111 +- .../containers/image/v5/oci/layout/oci_src.go | 24 + .../image/v5/oci/layout/oci_transport.go | 78 +- .../containers/image/v5/oci/layout/reader.go | 52 + .../image/v5/openshift/openshift-copies.go | 5 +- .../image/v5/openshift/openshift_dest.go | 9 + .../containers/image/v5/ostree/ostree_dest.go | 533 - .../containers/image/v5/ostree/ostree_src.go | 455 - .../image/v5/ostree/ostree_transport.go | 242 - .../v5/pkg/blobinfocache/memory/memory.go | 2 +- .../v5/pkg/blobinfocache/sqlite/sqlite.go | 33 +- .../image/v5/pkg/docker/config/config.go | 52 +- .../v5/pkg/sysregistriesv2/paths_common.go | 1 - .../v5/pkg/sysregistriesv2/paths_freebsd.go | 1 - .../v5/pkg/sysregistriesv2/shortnames.go | 12 +- .../sysregistriesv2/system_registries_v2.go | 22 +- .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 10 +- .../containers/image/v5/sif/load.go | 10 +- .../image/v5/signature/fulcio_cert.go | 31 +- .../image/v5/signature/fulcio_cert_stub.go | 28 - .../image/v5/signature/internal/errors.go | 9 + .../v5/signature/internal/rekor_api_types.go | 95 + .../image/v5/signature/internal/rekor_set.go | 40 +- .../v5/signature/internal/rekor_set_stub.go | 15 - .../image/v5/signature/mechanism_gpgme.go | 1 - .../image/v5/signature/mechanism_openpgp.go | 1 - .../containers/image/v5/signature/pki_cert.go | 74 + .../v5/signature/policy_config_sigstore.go | 192 +- .../v5/signature/policy_eval_sigstore.go | 99 +- .../image/v5/signature/policy_paths_common.go | 1 - .../v5/signature/policy_paths_freebsd.go | 1 - .../image/v5/signature/policy_types.go | 37 +- .../image/v5/storage/storage_dest.go | 642 +- .../image/v5/storage/storage_image.go | 1 - .../image/v5/storage/storage_reference.go | 5 +- .../image/v5/storage/storage_src.go | 23 +- .../image/v5/storage/storage_transport.go | 14 +- .../image/v5/tarball/tarball_src.go | 61 +- .../transports/alltransports/alltransports.go | 5 +- .../transports/alltransports/docker_daemon.go | 1 - .../alltransports/docker_daemon_stub.go | 1 - .../v5/transports/alltransports/ostree.go | 9 - .../transports/alltransports/ostree_stub.go | 10 - .../v5/transports/alltransports/storage.go | 1 - .../transports/alltransports/storage_stub.go | 1 - .../image/v5/transports/transports.go | 2 +- .../containers/image/v5/types/types.go | 13 + .../containers/image/v5/version/version.go | 4 +- .../keywrap/pkcs7/keywrapper_pkcs7.go | 2 +- .../github.com/containers/storage/.cirrus.yml | 11 +- .../containers/storage/.golangci.yml | 23 +- .../containers/storage/CONTRIBUTING.md | 121 +- vendor/github.com/containers/storage/Makefile | 2 +- vendor/github.com/containers/storage/VERSION | 2 +- vendor/github.com/containers/storage/check.go | 8 +- .../containers/storage/containers.go | 12 +- .../containers/storage/deprecated.go | 7 +- .../containers/storage/drivers/aufs/aufs.go | 23 +- .../containers/storage/drivers/btrfs/btrfs.go | 17 + .../storage/drivers/btrfs/version.go | 2 +- .../storage/drivers/btrfs/version_none.go | 14 - .../containers/storage/drivers/chown.go | 4 +- .../storage/drivers/chown_darwin.go | 2 +- .../containers/storage/drivers/chown_unix.go | 6 +- .../storage/drivers/copy/copy_linux.go | 18 +- .../storage/drivers/copy/copy_unsupported.go | 4 +- .../containers/storage/drivers/driver.go | 55 +- .../storage/drivers/overlay/check.go | 2 +- .../storage/drivers/overlay/check_116.go | 3 +- .../storage/drivers/overlay/composefs.go | 10 +- .../storage/drivers/overlay/overlay.go | 240 +- .../storage/drivers/overlay/overlay_nocgo.go | 23 - .../drivers/quota/projectquota_supported.go | 15 +- .../drivers/register/register_overlay.go | 2 +- .../containers/storage/drivers/vfs/driver.go | 65 +- .../storage/drivers/windows/windows.go | 21 +- .../containers/storage/drivers/zfs/zfs.go | 17 + .../github.com/containers/storage/images.go | 6 +- .../storage/internal/dedup/dedup.go | 163 + .../storage/internal/dedup/dedup_linux.go | 139 + .../internal/dedup/dedup_unsupported.go | 27 + .../internal/rawfilelock/rawfilelock.go | 64 + .../internal/rawfilelock/rawfilelock_unix.go | 49 + .../rawfilelock/rawfilelock_windows.go | 48 + .../staging_lockfile/staging_lockfile.go | 147 + .../storage/internal/tempdir/tempdir.go | 243 + .../github.com/containers/storage/layers.go | 231 +- .../containers/storage/pkg/archive/archive.go | 486 +- .../storage/pkg/archive/archive_linux.go | 4 +- .../storage/pkg/archive/archive_unix.go | 6 +- .../storage/pkg/archive/archive_windows.go | 2 +- .../containers/storage/pkg/archive/changes.go | 15 +- .../storage/pkg/archive/changes_linux.go | 22 +- .../containers/storage/pkg/archive/diff.go | 11 +- .../storage/pkg/chrootarchive/archive.go | 5 +- .../storage/pkg/chrootarchive/diff_unix.go | 3 - .../storage/pkg/chrootarchive/init_darwin.go | 4 - .../storage/pkg/chrootarchive/init_windows.go | 4 - .../storage/pkg/chunked/bloom_filter_linux.go | 2 +- .../storage/pkg/chunked/cache_linux.go | 12 +- .../storage/pkg/chunked/compression.go | 18 +- .../storage/pkg/chunked/compression_linux.go | 294 +- .../pkg/chunked/compressor/compressor.go | 90 +- .../storage/pkg/chunked/dump/dump.go | 70 +- .../storage/pkg/chunked/filesystem_linux.go | 73 +- .../internal/{ => minimal}/compression.go | 19 +- .../storage/pkg/chunked/internal/path/path.go | 27 + .../storage/pkg/chunked/storage_linux.go | 715 +- .../pkg/chunked/storage_unsupported.go | 7 +- .../containers/storage/pkg/chunked/toc/toc.go | 4 +- .../storage/pkg/directory/directory_unix.go | 6 +- .../storage/pkg/fileutils/exists_unix.go | 4 +- .../storage/pkg/fileutils/reflink_linux.go | 20 + .../pkg/fileutils/reflink_unsupported.go | 15 + .../storage/pkg/idmap/idmapped_utils.go | 7 +- .../containers/storage/pkg/idtools/idtools.go | 175 +- .../storage/pkg/idtools/idtools_supported.go | 13 +- .../storage/pkg/ioutils/bytespipe.go | 7 +- .../containers/storage/pkg/ioutils/writers.go | 4 +- .../storage/pkg/lockfile/lockfile.go | 97 +- .../storage/pkg/lockfile/lockfile_unix.go | 40 - .../storage/pkg/lockfile/lockfile_windows.go | 36 - .../storage/pkg/loopback/attach_loopback.go | 8 +- .../containers/storage/pkg/loopback/ioctl.go | 2 +- .../storage/pkg/loopback/loop_wrapper.go | 40 +- .../storage/pkg/loopback/loopback.go | 6 +- .../containers/storage/pkg/pools/pools.go | 4 +- .../containers/storage/pkg/reexec/reexec.go | 2 +- .../storage/pkg/stringutils/stringutils.go | 4 +- .../storage/pkg/system/extattr_freebsd.go | 93 + .../storage/pkg/system/extattr_unsupported.go | 24 + .../storage/pkg/system/stat_linux.go | 4 +- .../storage/pkg/system/stat_netbsd.go | 13 + .../storage/pkg/system/xattrs_darwin.go | 2 +- .../storage/pkg/system/xattrs_freebsd.go | 85 + .../storage/pkg/system/xattrs_linux.go | 2 +- .../storage/pkg/system/xattrs_unsupported.go | 4 +- .../storage/pkg/unshare/unshare_linux.go | 25 +- .../containers/storage/storage.conf | 19 + vendor/github.com/containers/storage/store.go | 316 +- .../containers/storage/types/options.go | 26 +- .../containers/storage/types/utils.go | 4 +- .../github.com/containers/storage/userns.go | 7 +- .../cyphar/filepath-securejoin/CHANGELOG.md | 82 +- .../cyphar/filepath-securejoin/VERSION | 2 +- .../gocompat_errors_go120.go | 18 + .../gocompat_errors_unsupported.go | 38 + .../gocompat_generics_go121.go | 32 + .../gocompat_generics_unsupported.go | 124 + .../cyphar/filepath-securejoin/join.go | 49 +- .../filepath-securejoin/lookup_linux.go | 3 +- .../cyphar/filepath-securejoin/mkdir_linux.go | 65 +- .../filepath-securejoin/openat2_linux.go | 3 +- .../filepath-securejoin/procfs_linux.go | 30 +- .../registry/client/auth/challenge/addr.go | 27 - .../client/auth/challenge/authchallenge.go | 237 - .../client/command.go | 25 +- vendor/github.com/docker/docker/AUTHORS | 44 +- vendor/github.com/docker/docker/api/common.go | 4 +- .../github.com/docker/docker/api/swagger.yaml | 2138 ++- .../docker/docker/api/types/blkiodev/blkio.go | 2 +- .../docker/docker/api/types/build/build.go | 91 + .../docker/docker/api/types/build/cache.go | 52 + .../docker/api/types/build/disk_usage.go | 8 + .../docker/docker/api/types/client.go | 200 +- .../api/types/{ => common}/id_response.go | 4 +- .../docker/api/types/container/commit.go | 7 + .../docker/api/types/container/config.go | 2 +- .../docker/api/types/container/container.go | 144 + .../api/types/container/container_top.go | 22 - .../api/types/container/container_update.go | 16 - .../docker/api/types/container/disk_usage.go | 8 + .../docker/docker/api/types/container/exec.go | 12 +- .../docker/api/types/container/health.go | 50 + .../docker/api/types/container/hostconfig.go | 8 +- .../api/types/container/hostconfig_unix.go | 2 +- .../api/types/container/hostconfig_windows.go | 2 +- .../api/types/container/network_settings.go | 56 + .../docker/api/types/{ => container}/port.go | 2 +- .../docker/api/types/container/state.go | 64 + .../docker/api/types/container/stats.go | 30 +- .../api/types/container/top_response.go | 18 + .../api/types/container/update_response.go | 14 + .../api/types/container/waitcondition.go | 2 +- .../docker/docker/api/types/events/events.go | 14 +- .../docker/docker/api/types/filters/errors.go | 13 - .../docker/docker/api/types/filters/parse.go | 18 +- .../docker/api/types/image/disk_usage.go | 8 + .../docker/api/types/image/image_history.go | 2 +- .../docker/api/types/image/image_inspect.go | 142 + .../docker/docker/api/types/image/opts.go | 40 +- .../docker/docker/api/types/image/summary.go | 9 + .../docker/docker/api/types/mount/mount.go | 9 +- .../docker/api/types/network/endpoint.go | 6 + .../docker/api/types/network/network.go | 6 +- .../docker/api/types/plugin_responses.go | 2 +- .../docker/api/types/registry/authconfig.go | 21 +- .../docker/api/types/registry/authenticate.go | 2 +- .../docker/api/types/registry/registry.go | 52 +- .../docker/api/types/registry/search.go | 9 +- .../driver_data.go} | 8 +- .../docker/api/types/strslice/strslice.go | 2 +- .../docker/docker/api/types/swarm/common.go | 2 +- .../docker/docker/api/types/swarm/config.go | 26 +- .../docker/api/types/swarm/container.go | 2 +- .../docker/docker/api/types/swarm/network.go | 2 +- .../docker/docker/api/types/swarm/node.go | 14 +- .../docker/docker/api/types/swarm/runtime.go | 2 +- .../docker/api/types/swarm/runtime/gen.go | 2 +- .../docker/docker/api/types/swarm/secret.go | 38 +- .../docker/docker/api/types/swarm/service.go | 74 +- .../docker/docker/api/types/swarm/swarm.go | 9 +- .../docker/docker/api/types/swarm/task.go | 8 +- .../docker/api/types/system/disk_usage.go | 17 + .../docker/docker/api/types/system/info.go | 30 +- .../docker/docker/api/types/time/timestamp.go | 12 +- .../docker/docker/api/types/types.go | 394 +- .../docker/api/types/types_deprecated.go | 299 +- .../docker/api/types/versions/compare.go | 2 +- .../docker/api/types/volume/disk_usage.go | 8 + .../docker/docker/api/types/volume/options.go | 2 +- .../docker/api/types/volume/volume_update.go | 2 +- .../docker/docker/client/build_cancel.go | 6 +- .../docker/docker/client/build_prune.go | 29 +- ...nterface_experimental.go => checkpoint.go} | 12 +- .../docker/docker/client/checkpoint_create.go | 11 +- .../docker/docker/client/checkpoint_delete.go | 7 +- .../docker/docker/client/checkpoint_list.go | 4 +- .../github.com/docker/docker/client/client.go | 39 +- .../{interface.go => client_interfaces.go} | 99 +- .../docker/docker/client/client_unix.go | 2 +- .../docker/docker/client/client_windows.go | 2 +- .../docker/docker/client/config_create.go | 9 +- .../docker/docker/client/config_inspect.go | 9 +- .../docker/docker/client/config_list.go | 7 +- .../docker/docker/client/config_remove.go | 6 +- .../docker/docker/client/config_update.go | 6 +- .../docker/docker/client/container_attach.go | 11 +- .../docker/docker/client/container_commit.go | 22 +- .../docker/docker/client/container_copy.go | 35 +- .../docker/docker/client/container_create.go | 74 +- .../docker/docker/client/container_diff.go | 19 +- .../docker/docker/client/container_exec.go | 23 +- .../docker/docker/client/container_export.go | 11 +- .../docker/docker/client/container_inspect.go | 42 +- .../docker/docker/client/container_kill.go | 7 +- .../docker/docker/client/container_list.go | 9 +- .../docker/docker/client/container_logs.go | 13 +- .../docker/docker/client/container_pause.go | 7 +- .../docker/docker/client/container_prune.go | 19 +- .../docker/docker/client/container_remove.go | 7 +- .../docker/docker/client/container_rename.go | 7 +- .../docker/docker/client/container_resize.go | 15 +- .../docker/docker/client/container_restart.go | 7 +- .../docker/docker/client/container_start.go | 11 +- .../docker/docker/client/container_stats.go | 20 +- .../docker/docker/client/container_stop.go | 7 +- .../docker/docker/client/container_top.go | 15 +- .../docker/docker/client/container_unpause.go | 7 +- .../docker/docker/client/container_update.go | 19 +- .../docker/docker/client/container_wait.go | 13 +- .../docker/docker/client/disk_usage.go | 8 +- .../docker/client/distribution_inspect.go | 14 +- .../docker/docker/client/envvars.go | 2 +- .../github.com/docker/docker/client/errors.go | 64 +- .../github.com/docker/docker/client/events.go | 6 +- .../github.com/docker/docker/client/hijack.go | 26 +- .../docker/docker/client/image_build.go | 140 +- .../docker/docker/client/image_create.go | 8 +- .../docker/docker/client/image_history.go | 48 +- .../docker/client/image_history_opts.go | 19 + .../docker/docker/client/image_import.go | 20 +- .../docker/docker/client/image_inspect.go | 74 +- .../docker/client/image_inspect_opts.go | 62 + .../docker/docker/client/image_list.go | 8 +- .../docker/docker/client/image_load.go | 41 +- .../docker/docker/client/image_load_opts.go | 41 + .../docker/docker/client/image_prune.go | 19 +- .../docker/docker/client/image_pull.go | 10 +- .../docker/docker/client/image_push.go | 15 +- .../docker/docker/client/image_remove.go | 16 +- .../docker/docker/client/image_save.go | 28 +- .../docker/docker/client/image_save_opts.go | 33 + .../docker/docker/client/image_search.go | 10 +- .../docker/docker/client/image_tag.go | 4 +- .../github.com/docker/docker/client/info.go | 8 +- .../docker/docker/client/interface_stable.go | 10 - .../github.com/docker/docker/client/login.go | 4 +- .../docker/docker/client/network_connect.go | 12 +- .../docker/docker/client/network_create.go | 15 +- .../docker/client/network_disconnect.go | 12 +- .../docker/docker/client/network_inspect.go | 9 +- .../docker/docker/client/network_list.go | 4 +- .../docker/docker/client/network_prune.go | 19 +- .../docker/docker/client/network_remove.go | 6 +- .../docker/docker/client/node_inspect.go | 13 +- .../docker/docker/client/node_list.go | 7 +- .../docker/docker/client/node_remove.go | 11 +- .../docker/docker/client/node_update.go | 7 +- .../docker/docker/client/options.go | 13 +- .../github.com/docker/docker/client/ping.go | 53 +- .../docker/docker/client/plugin_create.go | 2 +- .../docker/docker/client/plugin_disable.go | 6 +- .../docker/docker/client/plugin_enable.go | 6 +- .../docker/docker/client/plugin_inspect.go | 9 +- .../docker/docker/client/plugin_install.go | 28 +- .../docker/docker/client/plugin_list.go | 4 +- .../docker/docker/client/plugin_push.go | 8 +- .../docker/docker/client/plugin_remove.go | 7 +- .../docker/docker/client/plugin_set.go | 7 +- .../docker/docker/client/plugin_upgrade.go | 13 +- .../docker/docker/client/request.go | 156 +- .../docker/docker/client/secret_create.go | 13 +- .../docker/docker/client/secret_inspect.go | 11 +- .../docker/docker/client/secret_list.go | 7 +- .../docker/docker/client/secret_remove.go | 6 +- .../docker/docker/client/secret_update.go | 6 +- .../docker/docker/client/service_create.go | 27 +- .../docker/docker/client/service_inspect.go | 17 +- .../docker/docker/client/service_list.go | 7 +- .../docker/docker/client/service_logs.go | 9 +- .../docker/docker/client/service_remove.go | 7 +- .../docker/docker/client/service_update.go | 19 +- .../docker/client/swarm_get_unlock_key.go | 16 +- .../docker/docker/client/swarm_init.go | 8 +- .../docker/docker/client/swarm_inspect.go | 8 +- .../docker/docker/client/swarm_join.go | 2 +- .../docker/docker/client/swarm_leave.go | 2 +- .../docker/docker/client/swarm_unlock.go | 6 +- .../docker/docker/client/swarm_update.go | 2 +- .../docker/docker/client/task_inspect.go | 14 +- .../docker/docker/client/task_list.go | 7 +- .../docker/docker/client/task_logs.go | 4 +- .../github.com/docker/docker/client/utils.go | 68 +- .../docker/docker/client/version.go | 4 +- .../docker/docker/client/volume_create.go | 9 +- .../docker/docker/client/volume_inspect.go | 16 +- .../docker/docker/client/volume_list.go | 10 +- .../docker/docker/client/volume_prune.go | 19 +- .../docker/docker/client/volume_remove.go | 7 +- .../docker/docker/client/volume_update.go | 6 +- .../github.com/docker/docker/errdefs/defs.go | 69 - .../github.com/docker/docker/errdefs/doc.go | 8 - .../docker/docker/errdefs/helpers.go | 279 - .../docker/docker/errdefs/http_helpers.go | 46 - vendor/github.com/docker/docker/errdefs/is.go | 123 - .../docker/internal/lazyregexp/lazyregexp.go | 90 + .../docker/internal/multierror/multierror.go | 2 +- .../go-jose/go-jose/v4/CONTRIBUTING.md | 6 - .../github.com/go-jose/go-jose/v4/README.md | 10 +- vendor/github.com/go-jose/go-jose/v4/jwe.go | 5 +- vendor/github.com/go-jose/go-jose/v4/jwk.go | 4 +- vendor/github.com/go-jose/go-jose/v4/jws.go | 5 +- vendor/github.com/go-logr/logr/.golangci.yaml | 16 +- vendor/github.com/go-logr/logr/funcr/funcr.go | 8 +- .../go-openapi/analysis/.codecov.yml | 5 - .../go-openapi/analysis/.gitattributes | 2 - .../github.com/go-openapi/analysis/.gitignore | 5 - .../go-openapi/analysis/.golangci.yml | 61 - .../go-openapi/analysis/CODE_OF_CONDUCT.md | 74 - .../github.com/go-openapi/analysis/README.md | 27 - .../go-openapi/analysis/analyzer.go | 1064 -- .../github.com/go-openapi/analysis/debug.go | 23 - vendor/github.com/go-openapi/analysis/doc.go | 43 - .../github.com/go-openapi/analysis/fixer.go | 79 - .../github.com/go-openapi/analysis/flatten.go | 814 - .../go-openapi/analysis/flatten_name.go | 308 - .../go-openapi/analysis/flatten_options.go | 79 - .../analysis/internal/debug/debug.go | 41 - .../internal/flatten/normalize/normalize.go | 87 - .../internal/flatten/operations/operations.go | 90 - .../internal/flatten/replace/replace.go | 458 - .../flatten/schutils/flatten_schema.go | 29 - .../analysis/internal/flatten/sortref/keys.go | 201 - .../internal/flatten/sortref/sort_ref.go | 141 - .../github.com/go-openapi/analysis/mixin.go | 515 - .../github.com/go-openapi/analysis/schema.go | 256 - .../go-openapi/errors/.gitattributes | 1 - .../github.com/go-openapi/errors/.gitignore | 2 - .../go-openapi/errors/.golangci.yml | 62 - .../go-openapi/errors/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/errors/README.md | 8 - vendor/github.com/go-openapi/errors/api.go | 192 - vendor/github.com/go-openapi/errors/auth.go | 22 - vendor/github.com/go-openapi/errors/doc.go | 26 - .../github.com/go-openapi/errors/headers.go | 103 - .../go-openapi/errors/middleware.go | 50 - .../github.com/go-openapi/errors/parsing.go | 78 - vendor/github.com/go-openapi/errors/schema.go | 615 - .../go-openapi/jsonpointer/.editorconfig | 26 - .../go-openapi/jsonpointer/.gitignore | 1 - .../go-openapi/jsonpointer/.golangci.yml | 61 - .../go-openapi/jsonpointer/CODE_OF_CONDUCT.md | 74 - .../github.com/go-openapi/jsonpointer/LICENSE | 202 - .../go-openapi/jsonpointer/README.md | 19 - .../go-openapi/jsonpointer/pointer.go | 531 - .../go-openapi/jsonreference/.gitignore | 1 - .../go-openapi/jsonreference/.golangci.yml | 61 - .../jsonreference/CODE_OF_CONDUCT.md | 74 - .../go-openapi/jsonreference/LICENSE | 202 - .../go-openapi/jsonreference/README.md | 19 - .../jsonreference/internal/normalize_url.go | 69 - .../go-openapi/jsonreference/reference.go | 158 - .../github.com/go-openapi/loads/.editorconfig | 26 - vendor/github.com/go-openapi/loads/.gitignore | 4 - .../github.com/go-openapi/loads/.golangci.yml | 61 - .../github.com/go-openapi/loads/.travis.yml | 25 - .../go-openapi/loads/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/loads/LICENSE | 202 - vendor/github.com/go-openapi/loads/README.md | 6 - vendor/github.com/go-openapi/loads/doc.go | 18 - vendor/github.com/go-openapi/loads/loaders.go | 133 - vendor/github.com/go-openapi/loads/options.go | 61 - vendor/github.com/go-openapi/loads/spec.go | 275 - .../go-openapi/runtime/.editorconfig | 26 - .../go-openapi/runtime/.gitattributes | 1 - .../github.com/go-openapi/runtime/.gitignore | 5 - .../go-openapi/runtime/.golangci.yml | 62 - .../go-openapi/runtime/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/runtime/LICENSE | 202 - .../github.com/go-openapi/runtime/README.md | 10 - .../go-openapi/runtime/bytestream.go | 222 - .../go-openapi/runtime/client_auth_info.go | 30 - .../go-openapi/runtime/client_operation.go | 41 - .../go-openapi/runtime/client_request.go | 152 - .../go-openapi/runtime/client_response.go | 110 - .../go-openapi/runtime/constants.go | 49 - vendor/github.com/go-openapi/runtime/csv.go | 350 - .../go-openapi/runtime/csv_options.go | 121 - .../github.com/go-openapi/runtime/discard.go | 9 - vendor/github.com/go-openapi/runtime/file.go | 19 - .../github.com/go-openapi/runtime/headers.go | 45 - .../go-openapi/runtime/interfaces.go | 112 - vendor/github.com/go-openapi/runtime/json.go | 38 - .../github.com/go-openapi/runtime/request.go | 149 - .../github.com/go-openapi/runtime/statuses.go | 90 - vendor/github.com/go-openapi/runtime/text.go | 116 - .../github.com/go-openapi/runtime/values.go | 19 - vendor/github.com/go-openapi/runtime/xml.go | 36 - .../github.com/go-openapi/spec/.editorconfig | 26 - vendor/github.com/go-openapi/spec/.gitignore | 1 - .../github.com/go-openapi/spec/.golangci.yml | 61 - .../go-openapi/spec/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/spec/LICENSE | 202 - vendor/github.com/go-openapi/spec/README.md | 54 - vendor/github.com/go-openapi/spec/cache.go | 98 - .../go-openapi/spec/contact_info.go | 57 - vendor/github.com/go-openapi/spec/debug.go | 49 - vendor/github.com/go-openapi/spec/embed.go | 17 - vendor/github.com/go-openapi/spec/errors.go | 19 - vendor/github.com/go-openapi/spec/expander.go | 607 - .../go-openapi/spec/external_docs.go | 24 - vendor/github.com/go-openapi/spec/header.go | 203 - vendor/github.com/go-openapi/spec/info.go | 184 - vendor/github.com/go-openapi/spec/items.go | 234 - vendor/github.com/go-openapi/spec/license.go | 56 - .../github.com/go-openapi/spec/normalizer.go | 202 - .../go-openapi/spec/normalizer_nonwindows.go | 44 - .../go-openapi/spec/normalizer_windows.go | 154 - .../github.com/go-openapi/spec/operation.go | 400 - .../github.com/go-openapi/spec/parameter.go | 326 - .../github.com/go-openapi/spec/path_item.go | 87 - vendor/github.com/go-openapi/spec/paths.go | 97 - .../github.com/go-openapi/spec/properties.go | 91 - vendor/github.com/go-openapi/spec/ref.go | 193 - vendor/github.com/go-openapi/spec/resolver.go | 127 - vendor/github.com/go-openapi/spec/response.go | 152 - .../github.com/go-openapi/spec/responses.go | 140 - vendor/github.com/go-openapi/spec/schema.go | 645 - .../go-openapi/spec/schema_loader.go | 331 - .../spec/schemas/jsonschema-draft-04.json | 149 - .../go-openapi/spec/schemas/v2/schema.json | 1607 -- .../go-openapi/spec/security_scheme.go | 170 - vendor/github.com/go-openapi/spec/spec.go | 78 - vendor/github.com/go-openapi/spec/swagger.go | 448 - vendor/github.com/go-openapi/spec/tag.go | 75 - vendor/github.com/go-openapi/spec/url_go19.go | 11 - .../github.com/go-openapi/spec/validations.go | 215 - .../github.com/go-openapi/spec/xml_object.go | 68 - .../go-openapi/strfmt/.editorconfig | 26 - .../go-openapi/strfmt/.gitattributes | 2 - .../github.com/go-openapi/strfmt/.gitignore | 2 - .../go-openapi/strfmt/.golangci.yml | 61 - .../go-openapi/strfmt/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/strfmt/LICENSE | 202 - vendor/github.com/go-openapi/strfmt/README.md | 87 - vendor/github.com/go-openapi/strfmt/bson.go | 165 - vendor/github.com/go-openapi/strfmt/date.go | 187 - .../github.com/go-openapi/strfmt/default.go | 2051 --- vendor/github.com/go-openapi/strfmt/doc.go | 18 - .../github.com/go-openapi/strfmt/duration.go | 211 - vendor/github.com/go-openapi/strfmt/format.go | 327 - vendor/github.com/go-openapi/strfmt/time.go | 321 - vendor/github.com/go-openapi/strfmt/ulid.go | 230 - .../github.com/go-openapi/swag/.editorconfig | 26 - .../github.com/go-openapi/swag/.gitattributes | 2 - vendor/github.com/go-openapi/swag/.gitignore | 5 - .../github.com/go-openapi/swag/.golangci.yml | 60 - .../github.com/go-openapi/swag/BENCHMARK.md | 52 - .../go-openapi/swag/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/swag/LICENSE | 202 - vendor/github.com/go-openapi/swag/README.md | 23 - vendor/github.com/go-openapi/swag/convert.go | 208 - .../go-openapi/swag/convert_types.go | 730 - vendor/github.com/go-openapi/swag/doc.go | 31 - vendor/github.com/go-openapi/swag/file.go | 33 - .../go-openapi/swag/initialism_index.go | 202 - vendor/github.com/go-openapi/swag/json.go | 312 - vendor/github.com/go-openapi/swag/loading.go | 176 - .../github.com/go-openapi/swag/name_lexem.go | 93 - vendor/github.com/go-openapi/swag/net.go | 38 - vendor/github.com/go-openapi/swag/path.go | 59 - vendor/github.com/go-openapi/swag/split.go | 508 - .../go-openapi/swag/string_bytes.go | 8 - vendor/github.com/go-openapi/swag/util.go | 364 - vendor/github.com/go-openapi/swag/yaml.go | 481 - .../go-openapi/validate/.editorconfig | 26 - .../go-openapi/validate/.gitattributes | 2 - .../github.com/go-openapi/validate/.gitignore | 5 - .../go-openapi/validate/.golangci.yml | 61 - .../go-openapi/validate/BENCHMARK.md | 31 - .../go-openapi/validate/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/validate/LICENSE | 202 - .../github.com/go-openapi/validate/README.md | 36 - .../github.com/go-openapi/validate/context.go | 56 - .../github.com/go-openapi/validate/debug.go | 47 - .../go-openapi/validate/default_validator.go | 304 - vendor/github.com/go-openapi/validate/doc.go | 87 - .../go-openapi/validate/example_validator.go | 299 - .../github.com/go-openapi/validate/formats.go | 99 - .../github.com/go-openapi/validate/helpers.go | 333 - .../go-openapi/validate/object_validator.go | 431 - .../github.com/go-openapi/validate/options.go | 62 - .../github.com/go-openapi/validate/pools.go | 366 - .../go-openapi/validate/pools_debug.go | 1012 -- .../github.com/go-openapi/validate/result.go | 563 - vendor/github.com/go-openapi/validate/rexp.go | 71 - .../github.com/go-openapi/validate/schema.go | 354 - .../go-openapi/validate/schema_messages.go | 78 - .../go-openapi/validate/schema_option.go | 83 - .../go-openapi/validate/schema_props.go | 356 - .../go-openapi/validate/slice_validator.go | 150 - vendor/github.com/go-openapi/validate/spec.go | 852 - .../go-openapi/validate/spec_messages.go | 366 - vendor/github.com/go-openapi/validate/type.go | 213 - .../go-openapi/validate/update-fixtures.sh | 15 - .../go-openapi/validate/validator.go | 1051 -- .../github.com/go-openapi/validate/values.go | 450 - .../go-containerregistry/pkg/name/ref.go | 2 +- vendor/github.com/josharian/intern/README.md | 5 - vendor/github.com/josharian/intern/intern.go | 44 - vendor/github.com/josharian/intern/license.md | 21 - .../github.com/klauspost/compress/README.md | 140 +- .../klauspost/compress/flate/fast_encoder.go | 63 +- .../compress/flate/huffman_bit_writer.go | 19 +- .../klauspost/compress/flate/level1.go | 48 +- .../klauspost/compress/flate/level2.go | 2 +- .../klauspost/compress/flate/level3.go | 2 +- .../klauspost/compress/flate/level4.go | 10 +- .../klauspost/compress/flate/level5.go | 40 +- .../klauspost/compress/flate/level6.go | 32 +- .../compress/flate/matchlen_amd64.go | 16 - .../klauspost/compress/flate/matchlen_amd64.s | 66 - .../compress/flate/matchlen_generic.go | 15 +- .../klauspost/compress/flate/stateless.go | 13 +- .../klauspost/compress/huff0/bitreader.go | 25 +- .../klauspost/compress/internal/le/le.go | 5 + .../compress/internal/le/unsafe_disabled.go | 42 + .../compress/internal/le/unsafe_enabled.go | 55 + .../klauspost/compress/s2/README.md | 2 +- .../klauspost/compress/s2/decode_other.go | 26 +- .../klauspost/compress/s2/encode_all.go | 422 +- .../klauspost/compress/s2/encode_better.go | 416 +- .../klauspost/compress/s2/encode_go.go | 12 + vendor/github.com/klauspost/compress/s2sx.mod | 3 +- .../klauspost/compress/zip/writer.go | 15 +- .../klauspost/compress/zstd/README.md | 2 +- .../klauspost/compress/zstd/bitreader.go | 37 +- .../klauspost/compress/zstd/blockdec.go | 19 - .../klauspost/compress/zstd/blockenc.go | 27 +- .../klauspost/compress/zstd/decoder.go | 3 +- .../klauspost/compress/zstd/enc_base.go | 2 +- .../compress/zstd/matchlen_generic.go | 11 +- .../klauspost/compress/zstd/seqdec.go | 2 +- .../klauspost/compress/zstd/seqdec_amd64.s | 64 +- .../klauspost/compress/zstd/seqdec_generic.go | 2 +- .../klauspost/compress/zstd/seqenc.go | 2 - .../klauspost/compress/zstd/snappy.go | 4 +- .../klauspost/compress/zstd/zstd.go | 7 +- vendor/github.com/mailru/easyjson/LICENSE | 7 - .../github.com/mailru/easyjson/buffer/pool.go | 278 - .../mailru/easyjson/jlexer/bytestostr.go | 24 - .../easyjson/jlexer/bytestostr_nounsafe.go | 13 - .../mailru/easyjson/jlexer/error.go | 15 - .../mailru/easyjson/jlexer/lexer.go | 1244 -- .../mailru/easyjson/jwriter/writer.go | 405 - vendor/github.com/mattn/go-sqlite3/README.md | 9 +- .../github.com/mattn/go-sqlite3/callback.go | 3 +- .../mattn/go-sqlite3/sqlite3-binding.c | 13600 ++++++++++------ .../mattn/go-sqlite3/sqlite3-binding.h | 494 +- vendor/github.com/mattn/go-sqlite3/sqlite3.go | 80 +- .../go-sqlite3/sqlite3_opt_unlock_notify.c | 4 + .../go-sqlite3/sqlite3_opt_unlock_notify.go | 4 + .../mitchellh/mapstructure/CHANGELOG.md | 96 - .../github.com/mitchellh/mapstructure/LICENSE | 21 - .../mitchellh/mapstructure/README.md | 46 - .../mitchellh/mapstructure/decode_hooks.go | 279 - .../mitchellh/mapstructure/error.go | 50 - .../mitchellh/mapstructure/mapstructure.go | 1540 -- .../moby/sys/capability/.codespellrc | 3 - .../moby/sys/capability/.golangci.yml | 6 - .../moby/sys/capability/CHANGELOG.md | 38 +- .../moby/sys/capability/capability.go | 50 +- .../moby/sys/capability/capability_linux.go | 100 +- .../moby/sys/capability/capability_noop.go | 20 + vendor/github.com/moby/sys/capability/enum.go | 2 +- .../moby/sys/capability/syscall_linux.go | 20 +- vendor/github.com/moby/sys/user/idtools.go | 141 + .../github.com/moby/sys/user/idtools_unix.go | 143 + .../moby/sys/user/idtools_windows.go | 13 + vendor/github.com/oklog/ulid/.gitignore | 29 - vendor/github.com/oklog/ulid/.travis.yml | 16 - vendor/github.com/oklog/ulid/AUTHORS.md | 2 - vendor/github.com/oklog/ulid/CHANGELOG.md | 33 - vendor/github.com/oklog/ulid/CONTRIBUTING.md | 17 - vendor/github.com/oklog/ulid/Gopkg.lock | 15 - vendor/github.com/oklog/ulid/Gopkg.toml | 26 - vendor/github.com/oklog/ulid/README.md | 150 - vendor/github.com/oklog/ulid/ulid.go | 614 - .../runtime-spec/specs-go/config.go | 68 +- .../runtime-spec/specs-go/version.go | 2 +- .../selinux/go-selinux/label/label.go | 67 - .../selinux/go-selinux/label/label_linux.go | 11 - .../selinux/go-selinux/label/label_stub.go | 6 - .../selinux/go-selinux/selinux.go | 16 +- .../selinux/go-selinux/selinux_linux.go | 41 +- vendor/github.com/ostreedev/ostree-go/LICENSE | 17 - .../ostree-go/pkg/glibobject/gboolean.go | 60 - .../ostree-go/pkg/glibobject/gcancellable.go | 47 - .../ostree-go/pkg/glibobject/gerror.go | 71 - .../ostree-go/pkg/glibobject/gfile.go | 52 - .../ostree-go/pkg/glibobject/gfileinfo.go | 53 - .../ostree-go/pkg/glibobject/ghashtable.go | 50 - .../pkg/glibobject/ghashtableiter.go | 50 - .../ostree-go/pkg/glibobject/glibobject.go | 27 - .../ostree-go/pkg/glibobject/glibobject.go.h | 17 - .../ostree-go/pkg/glibobject/gobject.go | 79 - .../pkg/glibobject/goptioncontext.go | 51 - .../ostree-go/pkg/glibobject/gvariant.go | 100 - .../ostree-go/pkg/otbuiltin/builtin.go | 119 - .../ostree-go/pkg/otbuiltin/builtin.go.h | 179 - .../ostree-go/pkg/otbuiltin/checkout.go | 111 - .../ostree-go/pkg/otbuiltin/commit.go | 483 - .../ostreedev/ostree-go/pkg/otbuiltin/init.go | 84 - .../ostreedev/ostree-go/pkg/otbuiltin/log.go | 163 - .../ostree-go/pkg/otbuiltin/prune.go | 217 - vendor/github.com/proglottis/gpgme/.gitignore | 2 + .../github.com/proglottis/gpgme/callbacks.go | 42 - vendor/github.com/proglottis/gpgme/data.go | 119 +- vendor/github.com/proglottis/gpgme/go_gpgme.c | 20 +- vendor/github.com/proglottis/gpgme/go_gpgme.h | 4 +- vendor/github.com/proglottis/gpgme/gpgme.go | 78 +- .../proglottis/gpgme/unset_agent_info.go | 1 + .../{rekor => protobuf-specs}/COPYRIGHT.txt | 2 +- .../protobuf-specs}/LICENSE | 0 .../gen/pb-go/common/v1/sigstore_common.pb.go | 1273 ++ .../github.com/sigstore/rekor/CONTRIBUTORS.md | 116 - vendor/github.com/sigstore/rekor/LICENSE | 202 - .../rekor/pkg/generated/models/alpine.go | 210 - .../pkg/generated/models/alpine_schema.go | 29 - .../generated/models/alpine_v001_schema.go | 455 - .../pkg/generated/models/consistency_proof.go | 118 - .../rekor/pkg/generated/models/cose.go | 210 - .../rekor/pkg/generated/models/cose_schema.go | 29 - .../pkg/generated/models/cose_v001_schema.go | 521 - .../rekor/pkg/generated/models/dsse.go | 210 - .../rekor/pkg/generated/models/dsse_schema.go | 29 - .../pkg/generated/models/dsse_v001_schema.go | 685 - .../rekor/pkg/generated/models/error.go | 69 - .../pkg/generated/models/hashedrekord.go | 210 - .../generated/models/hashedrekord_schema.go | 29 - .../models/hashedrekord_v001_schema.go | 519 - .../rekor/pkg/generated/models/helm.go | 210 - .../rekor/pkg/generated/models/helm_schema.go | 29 - .../pkg/generated/models/helm_v001_schema.go | 662 - .../models/inactive_shard_log_info.go | 153 - .../pkg/generated/models/inclusion_proof.go | 179 - .../rekor/pkg/generated/models/intoto.go | 210 - .../pkg/generated/models/intoto_schema.go | 29 - .../generated/models/intoto_v001_schema.go | 514 - .../generated/models/intoto_v002_schema.go | 757 - .../rekor/pkg/generated/models/jar.go | 210 - .../rekor/pkg/generated/models/jar_schema.go | 29 - .../pkg/generated/models/jar_v001_schema.go | 569 - .../rekor/pkg/generated/models/log_entry.go | 445 - .../rekor/pkg/generated/models/log_info.go | 221 - .../pkg/generated/models/proposed_entry.go | 195 - .../rekor/pkg/generated/models/rekord.go | 210 - .../pkg/generated/models/rekord_schema.go | 29 - .../generated/models/rekord_v001_schema.go | 611 - .../rekor/pkg/generated/models/rfc3161.go | 210 - .../pkg/generated/models/rfc3161_schema.go | 29 - .../generated/models/rfc3161_v001_schema.go | 183 - .../rekor/pkg/generated/models/rpm.go | 210 - .../rekor/pkg/generated/models/rpm_schema.go | 29 - .../pkg/generated/models/rpm_v001_schema.go | 450 - .../pkg/generated/models/search_index.go | 341 - .../pkg/generated/models/search_log_query.go | 297 - .../rekor/pkg/generated/models/tuf.go | 210 - .../rekor/pkg/generated/models/tuf_schema.go | 29 - .../pkg/generated/models/tuf_v001_schema.go | 304 - .../pkg/signature/algorithm_registry.go | 314 + .../sigstore/sigstore/pkg/signature/ecdsa.go | 2 +- .../sigstore/sigstore/pkg/signature/signer.go | 22 + .../sigstore/pkg/signature/signerverifier.go | 22 + .../sigstore/sigstore/pkg/signature/util.go | 19 + .../sigstore/pkg/signature/verifier.go | 18 + .../smallstep}/pkcs7/.gitignore | 4 + .../smallstep}/pkcs7/LICENSE | 0 .../smallstep}/pkcs7/Makefile | 0 vendor/github.com/smallstep/pkcs7/README.md | 63 + .../smallstep}/pkcs7/ber.go | 21 +- .../smallstep}/pkcs7/decrypt.go | 70 +- .../smallstep}/pkcs7/encrypt.go | 102 +- .../pkcs7/internal/legacy/x509/debug.go | 14 + .../pkcs7/internal/legacy/x509/doc.go | 14 + .../pkcs7/internal/legacy/x509/oid.go | 377 + .../pkcs7/internal/legacy/x509/parser.go | 1027 ++ .../pkcs7/internal/legacy/x509/pkcs1.go | 15 + .../pkcs7/internal/legacy/x509/verify.go | 193 + .../pkcs7/internal/legacy/x509/x509.go | 488 + .../smallstep}/pkcs7/pkcs7.go | 95 +- .../smallstep}/pkcs7/sign.go | 0 .../smallstep}/pkcs7/verify.go | 50 +- .../sylabs/sif/v2/pkg/sif/create.go | 12 +- .../tchap/go-patricia/v2/patricia/patricia.go | 2 +- .../vbatts/tar-split/archive/tar/format.go | 4 + .../vbatts/tar-split/archive/tar/reader.go | 14 +- .../vbatts/tar-split/archive/tar/writer.go | 3 + vendor/github.com/vbauerster/mpb/v8/README.md | 10 +- vendor/github.com/vbauerster/mpb/v8/bar.go | 82 +- .../vbauerster/mpb/v8/bar_filler_bar.go | 134 +- .../vbauerster/mpb/v8/bar_filler_spinner.go | 18 +- .../vbauerster/mpb/v8/bar_option.go | 47 +- .../vbauerster/mpb/v8/container_option.go | 11 + .../github.com/vbauerster/mpb/v8/decor/eta.go | 2 +- .../vbauerster/mpb/v8/decor/size_type.go | 42 +- .../vbauerster/mpb/v8/heap_manager.go | 48 +- .../vbauerster/mpb/v8/priority_queue.go | 22 +- .../github.com/vbauerster/mpb/v8/progress.go | 118 +- vendor/go.mongodb.org/mongo-driver/LICENSE | 201 - .../go.mongodb.org/mongo-driver/bson/bson.go | 50 - .../bson/bsoncodec/array_codec.go | 55 - .../mongo-driver/bson/bsoncodec/bsoncodec.go | 382 - .../bson/bsoncodec/byte_slice_codec.go | 138 - .../bson/bsoncodec/codec_cache.go | 166 - .../bson/bsoncodec/cond_addr_codec.go | 63 - .../bson/bsoncodec/default_value_decoders.go | 1807 -- .../bson/bsoncodec/default_value_encoders.go | 856 - .../mongo-driver/bson/bsoncodec/doc.go | 95 - .../bson/bsoncodec/empty_interface_codec.go | 173 - .../mongo-driver/bson/bsoncodec/map_codec.go | 343 - .../mongo-driver/bson/bsoncodec/mode.go | 65 - .../bson/bsoncodec/pointer_codec.go | 108 - .../mongo-driver/bson/bsoncodec/proxy.go | 14 - .../mongo-driver/bson/bsoncodec/registry.go | 524 - .../bson/bsoncodec/slice_codec.go | 214 - .../bson/bsoncodec/string_codec.go | 140 - .../bson/bsoncodec/struct_codec.go | 736 - .../bson/bsoncodec/struct_tag_parser.go | 148 - .../mongo-driver/bson/bsoncodec/time_codec.go | 151 - .../mongo-driver/bson/bsoncodec/types.go | 58 - .../mongo-driver/bson/bsoncodec/uint_codec.go | 198 - .../bsonoptions/byte_slice_codec_options.go | 49 - .../mongo-driver/bson/bsonoptions/doc.go | 8 - .../empty_interface_codec_options.go | 49 - .../bson/bsonoptions/map_codec_options.go | 82 - .../bson/bsonoptions/slice_codec_options.go | 49 - .../bson/bsonoptions/string_codec_options.go | 52 - .../bson/bsonoptions/struct_codec_options.go | 107 - .../bson/bsonoptions/time_codec_options.go | 49 - .../bson/bsonoptions/uint_codec_options.go | 49 - .../mongo-driver/bson/bsonrw/copier.go | 489 - .../mongo-driver/bson/bsonrw/doc.go | 9 - .../bson/bsonrw/extjson_parser.go | 806 - .../bson/bsonrw/extjson_reader.go | 653 - .../bson/bsonrw/extjson_tables.go | 223 - .../bson/bsonrw/extjson_wrappers.go | 492 - .../bson/bsonrw/extjson_writer.go | 751 - .../mongo-driver/bson/bsonrw/json_scanner.go | 528 - .../mongo-driver/bson/bsonrw/mode.go | 108 - .../mongo-driver/bson/bsonrw/reader.go | 65 - .../mongo-driver/bson/bsonrw/value_reader.go | 890 - .../mongo-driver/bson/bsonrw/value_writer.go | 640 - .../mongo-driver/bson/bsonrw/writer.go | 87 - .../mongo-driver/bson/bsontype/bsontype.go | 116 - .../mongo-driver/bson/decoder.go | 208 - .../go.mongodb.org/mongo-driver/bson/doc.go | 139 - .../mongo-driver/bson/encoder.go | 199 - .../mongo-driver/bson/marshal.go | 453 - .../mongo-driver/bson/primitive/decimal.go | 434 - .../mongo-driver/bson/primitive/objectid.go | 206 - .../mongo-driver/bson/primitive/primitive.go | 231 - .../mongo-driver/bson/primitive_codecs.go | 122 - .../go.mongodb.org/mongo-driver/bson/raw.go | 101 - .../mongo-driver/bson/raw_element.go | 48 - .../mongo-driver/bson/raw_value.go | 320 - .../mongo-driver/bson/registry.go | 35 - .../go.mongodb.org/mongo-driver/bson/types.go | 50 - .../mongo-driver/bson/unmarshal.go | 177 - .../mongo-driver/x/bsonx/bsoncore/array.go | 164 - .../x/bsonx/bsoncore/bson_arraybuilder.go | 201 - .../x/bsonx/bsoncore/bson_documentbuilder.go | 189 - .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 846 - .../mongo-driver/x/bsonx/bsoncore/doc.go | 29 - .../mongo-driver/x/bsonx/bsoncore/document.go | 386 - .../x/bsonx/bsoncore/document_sequence.go | 189 - .../mongo-driver/x/bsonx/bsoncore/element.go | 152 - .../mongo-driver/x/bsonx/bsoncore/tables.go | 223 - .../mongo-driver/x/bsonx/bsoncore/value.go | 965 -- vendor/go.mozilla.org/pkcs7/README.md | 69 - .../go.mozilla.org/pkcs7/verify_test_dsa.go | 182 - .../auto/sdk/CONTRIBUTING.md | 27 + .../auto/sdk}/LICENSE | 0 .../auto/sdk/VERSIONING.md | 15 + vendor/go.opentelemetry.io/auto/sdk/doc.go | 14 + .../auto/sdk/internal/telemetry/attr.go | 58 + .../auto/sdk/internal/telemetry/doc.go | 8 + .../auto/sdk/internal/telemetry/id.go | 103 + .../auto/sdk/internal/telemetry/number.go | 67 + .../auto/sdk/internal/telemetry/resource.go | 66 + .../auto/sdk/internal/telemetry/scope.go | 67 + .../auto/sdk/internal/telemetry/span.go | 456 + .../auto/sdk/internal/telemetry/status.go | 40 + .../auto/sdk/internal/telemetry/traces.go | 189 + .../auto/sdk/internal/telemetry/value.go | 452 + vendor/go.opentelemetry.io/auto/sdk/limit.go | 94 + vendor/go.opentelemetry.io/auto/sdk/span.go | 432 + vendor/go.opentelemetry.io/auto/sdk/tracer.go | 124 + .../auto/sdk/tracer_provider.go | 33 + .../net/http/otelhttp/client.go | 6 +- .../net/http/otelhttp/common.go | 14 - .../net/http/otelhttp/config.go | 15 +- .../net/http/otelhttp/handler.go | 139 +- .../otelhttp/internal/request/body_wrapper.go | 78 + .../net/http/otelhttp/internal/request/gen.go | 10 + .../internal/request/resp_writer_wrapper.go | 122 + .../net/http/otelhttp/internal/semconv/env.go | 249 +- .../net/http/otelhttp/internal/semconv/gen.go | 14 + .../otelhttp/internal/semconv/httpconv.go | 539 + .../http/otelhttp/internal/semconv/util.go | 39 +- .../http/otelhttp/internal/semconv/v1.20.0.go | 211 +- .../http/otelhttp/internal/semconv/v1.24.0.go | 197 - .../otelhttp/internal/semconvutil/netconv.go | 13 +- .../net/http/otelhttp/start_time_context.go | 29 + .../net/http/otelhttp/transport.go | 108 +- .../net/http/otelhttp/version.go | 2 +- .../instrumentation/net/http/otelhttp/wrap.go | 99 - vendor/go.opentelemetry.io/otel/.gitignore | 9 +- vendor/go.opentelemetry.io/otel/.golangci.yml | 136 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 253 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 6 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 38 +- vendor/go.opentelemetry.io/otel/Makefile | 75 +- vendor/go.opentelemetry.io/otel/README.md | 45 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 29 +- vendor/go.opentelemetry.io/otel/VERSIONING.md | 2 +- .../go.opentelemetry.io/otel/attribute/set.go | 40 +- .../otel/baggage/baggage.go | 150 +- .../go.opentelemetry.io/otel/codes/codes.go | 5 +- .../otel/dependencies.Dockerfile | 3 + vendor/go.opentelemetry.io/otel/doc.go | 2 + .../otel/internal/attribute/attribute.go | 44 +- .../otel/internal/global/instruments.go | 14 +- .../otel/internal/global/meter.go | 382 +- .../otel/internal/global/trace.go | 33 +- .../otel/internal/rawhelpers.go | 12 +- .../otel/metric/asyncfloat64.go | 2 +- .../otel/metric/asyncint64.go | 2 +- .../otel/metric/instrument.go | 2 +- .../go.opentelemetry.io/otel/metric/meter.go | 13 + .../otel/metric/noop/README.md | 3 + .../otel/metric/noop/noop.go | 281 + vendor/go.opentelemetry.io/otel/renovate.json | 16 +- .../go.opentelemetry.io/otel/requirements.txt | 2 +- .../otel/semconv/v1.24.0/README.md | 3 - .../otel/semconv/v1.24.0/attribute_group.go | 4387 ----- .../otel/semconv/v1.24.0/event.go | 200 - .../otel/semconv/v1.24.0/resource.go | 2545 --- .../otel/semconv/v1.24.0/trace.go | 1323 -- .../otel/semconv/v1.26.0/README.md | 3 + .../otel/semconv/v1.26.0/attribute_group.go | 8996 ++++++++++ .../otel/semconv/{v1.24.0 => v1.26.0}/doc.go | 4 +- .../semconv/{v1.24.0 => v1.26.0}/exception.go | 2 +- .../semconv/{v1.24.0 => v1.26.0}/metric.go | 466 +- .../semconv/{v1.24.0 => v1.26.0}/schema.go | 4 +- vendor/go.opentelemetry.io/otel/trace/auto.go | 661 + .../go.opentelemetry.io/otel/trace/config.go | 2 +- .../go.opentelemetry.io/otel/trace/context.go | 2 +- vendor/go.opentelemetry.io/otel/trace/doc.go | 2 +- .../otel/trace/internal/telemetry/attr.go | 58 + .../otel/trace/internal/telemetry/doc.go | 8 + .../otel/trace/internal/telemetry/id.go | 103 + .../otel/trace/internal/telemetry/number.go | 67 + .../otel/trace/internal/telemetry/resource.go | 66 + .../otel/trace/internal/telemetry/scope.go | 67 + .../otel/trace/internal/telemetry/span.go | 460 + .../otel/trace/internal/telemetry/status.go | 40 + .../otel/trace/internal/telemetry/traces.go | 189 + .../otel/trace/internal/telemetry/value.go | 453 + vendor/go.opentelemetry.io/otel/trace/noop.go | 20 +- .../otel/trace/noop/README.md | 3 + .../otel/trace/noop/noop.go | 112 + .../otel/trace/provider.go | 59 + vendor/go.opentelemetry.io/otel/trace/span.go | 177 + .../go.opentelemetry.io/otel/trace/trace.go | 249 - .../go.opentelemetry.io/otel/trace/tracer.go | 37 + .../otel/trace/tracestate.go | 10 + .../otel/verify_examples.sh | 74 - .../otel/verify_released_changelog.sh | 42 + vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 18 +- vendor/golang.org/x/crypto/cryptobyte/asn1.go | 825 + .../x/crypto/cryptobyte/asn1/asn1.go | 46 + .../golang.org/x/crypto/cryptobyte/builder.go | 350 + .../golang.org/x/crypto/cryptobyte/string.go | 183 + .../x/crypto/internal/poly1305/mac_noasm.go | 2 +- .../poly1305/{sum_amd64.go => sum_asm.go} | 2 +- .../x/crypto/internal/poly1305/sum_loong64.s | 123 + .../x/crypto/internal/poly1305/sum_ppc64x.go | 47 - vendor/golang.org/x/exp/LICENSE | 27 - vendor/golang.org/x/exp/PATENTS | 22 - vendor/golang.org/x/exp/maps/maps.go | 94 - .../x/net/http2/client_conn_pool.go | 8 +- vendor/golang.org/x/net/http2/config.go | 122 + vendor/golang.org/x/net/http2/config_go124.go | 61 + .../x/net/http2/config_pre_go124.go | 16 + vendor/golang.org/x/net/http2/frame.go | 29 +- vendor/golang.org/x/net/http2/http2.go | 112 +- vendor/golang.org/x/net/http2/server.go | 358 +- vendor/golang.org/x/net/http2/transport.go | 833 +- vendor/golang.org/x/net/http2/unencrypted.go | 32 + vendor/golang.org/x/net/http2/write.go | 13 +- .../x/net/internal/httpcommon/ascii.go | 53 + .../httpcommon}/headermap.go | 24 +- .../x/net/internal/httpcommon/request.go | 467 + vendor/golang.org/x/net/trace/events.go | 2 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 20 +- vendor/golang.org/x/sys/cpu/cpu.go | 11 + .../golang.org/x/sys/cpu/cpu_linux_riscv64.go | 23 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 12 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 25 +- .../x/sys/unix/zerrors_linux_386.go | 1 + .../x/sys/unix/zerrors_linux_amd64.go | 1 + .../x/sys/unix/zerrors_linux_arm.go | 1 + .../x/sys/unix/zerrors_linux_arm64.go | 1 + .../x/sys/unix/zerrors_linux_loong64.go | 1 + .../x/sys/unix/zerrors_linux_mips.go | 1 + .../x/sys/unix/zerrors_linux_mips64.go | 1 + .../x/sys/unix/zerrors_linux_mips64le.go | 1 + .../x/sys/unix/zerrors_linux_mipsle.go | 1 + .../x/sys/unix/zerrors_linux_ppc.go | 1 + .../x/sys/unix/zerrors_linux_ppc64.go | 1 + .../x/sys/unix/zerrors_linux_ppc64le.go | 1 + .../x/sys/unix/zerrors_linux_riscv64.go | 1 + .../x/sys/unix/zerrors_linux_s390x.go | 1 + .../x/sys/unix/zerrors_linux_sparc64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 108 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 16 + .../x/sys/unix/ztypes_linux_amd64.go | 16 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 16 + .../x/sys/unix/ztypes_linux_arm64.go | 16 + .../x/sys/unix/ztypes_linux_loong64.go | 16 + .../x/sys/unix/ztypes_linux_mips.go | 16 + .../x/sys/unix/ztypes_linux_mips64.go | 16 + .../x/sys/unix/ztypes_linux_mips64le.go | 16 + .../x/sys/unix/ztypes_linux_mipsle.go | 16 + .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 16 + .../x/sys/unix/ztypes_linux_ppc64.go | 16 + .../x/sys/unix/ztypes_linux_ppc64le.go | 16 + .../x/sys/unix/ztypes_linux_riscv64.go | 16 + .../x/sys/unix/ztypes_linux_s390x.go | 16 + .../x/sys/unix/ztypes_linux_sparc64.go | 16 + .../x/sys/windows/security_windows.go | 49 +- .../x/sys/windows/syscall_windows.go | 6 +- .../golang.org/x/sys/windows/types_windows.go | 212 + .../x/sys/windows/zsyscall_windows.go | 9 + vendor/golang.org/x/term/terminal.go | 77 +- .../genproto/googleapis/api}/LICENSE | 0 .../api/annotations/annotations.pb.go | 119 + .../googleapis/api/annotations/client.pb.go | 2103 +++ .../api/annotations/field_behavior.pb.go | 266 + .../api/annotations/field_info.pb.go | 392 + .../googleapis/api/annotations/http.pb.go | 774 + .../googleapis/api/annotations/resource.pb.go | 659 + .../googleapis/api/annotations/routing.pb.go | 693 + .../googleapis/api/launch_stage.pb.go | 203 + .../googleapis/rpc/status/status.pb.go | 2 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 16 +- .../grpc/balancer/balancer.go | 110 +- .../grpc/balancer/base/balancer.go | 14 +- .../endpointsharding/endpointsharding.go | 356 + .../pickfirst/internal/internal.go} | 25 +- .../grpc/balancer/pickfirst/pickfirst.go | 16 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 927 ++ .../grpc/balancer/roundrobin/roundrobin.go | 70 +- .../grpc/balancer/subconn.go | 134 + .../grpc/balancer_wrapper.go | 199 +- .../grpc_binarylog_v1/binarylog.pb.go | 296 +- vendor/google.golang.org/grpc/clientconn.go | 115 +- vendor/google.golang.org/grpc/codec.go | 2 +- .../google.golang.org/grpc/credentials/tls.go | 35 +- vendor/google.golang.org/grpc/dialoptions.go | 57 +- .../grpc/experimental/stats/metricregistry.go | 27 +- .../grpc/experimental/stats/metrics.go | 78 +- .../grpc/grpclog/internal/loggerv2.go | 107 +- .../grpc/internal/backoff/backoff.go | 2 +- .../balancer/gracefulswitch/config.go | 2 + .../balancer/gracefulswitch/gracefulswitch.go | 10 +- .../grpc/internal/channelz/channel.go | 15 + .../grpc/internal/channelz/server.go | 2 + .../grpc/internal/channelz/socket.go | 7 + .../grpc/internal/channelz/subchannel.go | 2 + .../grpc/internal/channelz/trace.go | 19 +- .../grpc/internal/envconfig/envconfig.go | 21 +- .../grpc/internal/envconfig/xds.go | 10 + .../internal/grpcsync/callback_serializer.go | 2 +- .../grpc/internal/grpcutil/method.go | 2 +- .../grpc/internal/idle/idle.go | 4 +- .../grpc/internal/internal.go | 70 +- .../grpc/internal/metadata/metadata.go | 26 +- .../proxyattributes/proxyattributes.go | 54 + .../delegatingresolver/delegatingresolver.go | 427 + .../internal/resolver/dns/dns_resolver.go | 47 +- .../internal/stats/metrics_recorder_list.go | 10 + .../grpc/internal/status/status.go | 35 +- .../grpc/internal/transport/client_stream.go | 144 + .../grpc/internal/transport/flowcontrol.go | 9 +- .../grpc/internal/transport/handler_server.go | 38 +- .../grpc/internal/transport/http2_client.go | 168 +- .../grpc/internal/transport/http2_server.go | 96 +- .../grpc/internal/transport/http_util.go | 4 +- .../grpc/internal/transport/proxy.go | 62 +- .../grpc/internal/transport/server_stream.go | 180 + .../grpc/internal/transport/transport.go | 320 +- .../grpc/mem/buffer_slice.go | 59 +- vendor/google.golang.org/grpc/mem/buffers.go | 32 +- .../google.golang.org/grpc/picker_wrapper.go | 2 +- vendor/google.golang.org/grpc/preloader.go | 4 +- vendor/google.golang.org/grpc/resolver/map.go | 174 +- .../grpc/resolver/resolver.go | 25 +- .../grpc/resolver_wrapper.go | 36 +- vendor/google.golang.org/grpc/rpc_util.go | 122 +- vendor/google.golang.org/grpc/server.go | 118 +- .../google.golang.org/grpc/service_config.go | 22 +- .../google.golang.org/grpc/stats/metrics.go | 81 + vendor/google.golang.org/grpc/stats/stats.go | 109 +- vendor/google.golang.org/grpc/stream.go | 348 +- vendor/google.golang.org/grpc/version.go | 2 +- .../protobuf/encoding/protojson/decode.go | 7 +- .../protobuf/encoding/protojson/encode.go | 4 +- .../encoding/protojson/well_known_types.go | 6 +- .../protobuf/encoding/prototext/decode.go | 5 - .../protobuf/internal/descopts/options.go | 20 +- .../editiondefaults/editions_defaults.binpb | Bin 93 -> 146 bytes .../internal/editionssupport/editions.go | 7 +- .../protobuf/internal/encoding/tag/tag.go | 8 +- .../protobuf/internal/errors/is_go112.go | 40 - .../protobuf/internal/errors/is_go113.go | 13 - .../protobuf/internal/filedesc/desc.go | 31 +- .../protobuf/internal/filedesc/desc_init.go | 2 + .../protobuf/internal/filedesc/desc_lazy.go | 11 +- .../protobuf/internal/filedesc/editions.go | 13 +- .../protobuf/internal/filetype/build.go | 2 +- .../protobuf/internal/flags/flags.go | 2 +- .../protobuf/internal/genid/descriptor_gen.go | 16 + .../protobuf/internal/genid/doc.go | 2 +- .../internal/genid/go_features_gen.go | 49 +- .../protobuf/internal/genid/goname.go | 5 - .../protobuf/internal/genid/map_entry.go | 2 +- .../protobuf/internal/genid/name.go | 12 + .../protobuf/internal/genid/wrappers.go | 2 +- .../internal/impl/api_export_opaque.go | 128 + .../protobuf/internal/impl/bitmap.go | 34 + .../protobuf/internal/impl/bitmap_race.go | 126 + .../protobuf/internal/impl/checkinit.go | 33 + .../protobuf/internal/impl/codec_extension.go | 11 +- .../protobuf/internal/impl/codec_field.go | 78 +- .../internal/impl/codec_field_opaque.go | 264 + .../protobuf/internal/impl/codec_map.go | 14 +- .../protobuf/internal/impl/codec_map_go111.go | 38 - .../protobuf/internal/impl/codec_map_go112.go | 12 - .../protobuf/internal/impl/codec_message.go | 23 +- .../internal/impl/codec_message_opaque.go | 153 + .../protobuf/internal/impl/codec_reflect.go | 210 - .../protobuf/internal/impl/codec_unsafe.go | 3 - .../protobuf/internal/impl/convert.go | 2 +- .../protobuf/internal/impl/convert_map.go | 2 +- .../protobuf/internal/impl/decode.go | 56 +- .../protobuf/internal/impl/encode.go | 80 +- .../protobuf/internal/impl/equal.go | 224 + .../protobuf/internal/impl/lazy.go | 433 + .../internal/impl/legacy_extension.go | 1 + .../protobuf/internal/impl/legacy_message.go | 5 +- .../protobuf/internal/impl/merge.go | 27 + .../protobuf/internal/impl/message.go | 35 +- .../protobuf/internal/impl/message_opaque.go | 627 + .../internal/impl/message_opaque_gen.go | 132 + .../protobuf/internal/impl/message_reflect.go | 10 +- .../internal/impl/message_reflect_field.go | 160 +- .../impl/message_reflect_field_gen.go | 273 + .../protobuf/internal/impl/pointer_reflect.go | 215 - .../protobuf/internal/impl/pointer_unsafe.go | 15 +- .../internal/impl/pointer_unsafe_opaque.go | 42 + .../protobuf/internal/impl/presence.go | 142 + .../protobuf/internal/impl/validate.go | 40 +- .../protobuf/internal/impl/weak.go | 74 - .../internal/protolazy/bufferreader.go | 364 + .../protobuf/internal/protolazy/lazy.go | 359 + .../internal/protolazy/pointer_unsafe.go | 17 + .../protobuf/internal/strs/strings_pure.go | 28 - ...ings_unsafe_go121.go => strings_unsafe.go} | 3 - .../internal/strs/strings_unsafe_go120.go | 95 - .../protobuf/internal/version/version.go | 4 +- .../protobuf/proto/decode.go | 21 +- .../protobuf/proto/encode.go | 3 +- .../google.golang.org/protobuf/proto/equal.go | 9 + .../protobuf/proto/extension.go | 71 + .../google.golang.org/protobuf/proto/merge.go | 6 + .../google.golang.org/protobuf/proto/size.go | 8 + .../protobuf/proto/wrapperopaque.go | 80 + .../protobuf/reflect/protodesc/desc.go | 20 +- .../protobuf/reflect/protodesc/desc_init.go | 5 +- .../reflect/protodesc/desc_resolve.go | 26 +- .../reflect/protodesc/desc_validate.go | 12 - .../protobuf/reflect/protodesc/editions.go | 46 +- .../protobuf/reflect/protodesc/proto.go | 3 - .../protobuf/reflect/protoreflect/methods.go | 10 + .../reflect/protoreflect/source_gen.go | 2 + .../protobuf/reflect/protoreflect/type.go | 12 +- .../protobuf/reflect/protoreflect/value.go | 2 +- .../reflect/protoreflect/value_pure.go | 60 - ...{value_unsafe_go121.go => value_unsafe.go} | 3 - .../protoreflect/value_unsafe_go120.go | 99 - .../protobuf/runtime/protoiface/methods.go | 34 + .../protobuf/runtime/protoimpl/impl.go | 4 + .../types/descriptorpb/descriptor.pb.go | 2510 +-- .../types/gofeaturespb/go_features.pb.go | 260 +- .../protobuf/types/known/anypb/any.pb.go | 65 +- .../types/known/durationpb/duration.pb.go | 66 +- .../types/known/timestamppb/timestamp.pb.go | 66 +- vendor/modules.txt | 268 +- 1258 files changed, 61410 insertions(+), 104231 deletions(-) create mode 100644 vendor/dario.cat/mergo/FUNDING.json create mode 100644 vendor/github.com/Microsoft/hcsshim/.clang-format create mode 100644 vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles rename vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/{cim_mount.go => cimfs.go} (70%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go delete mode 100644 vendor/github.com/asaskevich/govalidator/.gitignore delete mode 100644 vendor/github.com/asaskevich/govalidator/.travis.yml delete mode 100644 vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md delete mode 100644 vendor/github.com/asaskevich/govalidator/LICENSE delete mode 100644 vendor/github.com/asaskevich/govalidator/README.md delete mode 100644 vendor/github.com/asaskevich/govalidator/arrays.go delete mode 100644 vendor/github.com/asaskevich/govalidator/converter.go delete mode 100644 vendor/github.com/asaskevich/govalidator/doc.go delete mode 100644 vendor/github.com/asaskevich/govalidator/error.go delete mode 100644 vendor/github.com/asaskevich/govalidator/numerics.go delete mode 100644 vendor/github.com/asaskevich/govalidator/patterns.go delete mode 100644 vendor/github.com/asaskevich/govalidator/types.go delete mode 100644 vendor/github.com/asaskevich/govalidator/utils.go delete mode 100644 vendor/github.com/asaskevich/govalidator/validator.go delete mode 100644 vendor/github.com/asaskevich/govalidator/wercker.yml create mode 100644 vendor/github.com/containerd/errdefs/pkg/errhttp/http.go create mode 100644 vendor/github.com/containerd/typeurl/v2/types_gogo.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/reader.go delete mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_dest.go delete mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_src.go delete mode 100644 vendor/github.com/containers/image/v5/ostree/ostree_transport.go delete mode 100644 vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go create mode 100644 vendor/github.com/containers/image/v5/signature/internal/rekor_api_types.go delete mode 100644 vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go create mode 100644 vendor/github.com/containers/image/v5/signature/pki_cert.go delete mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/ostree.go delete mode 100644 vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go delete mode 100644 vendor/github.com/containers/storage/drivers/btrfs/version_none.go delete mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go create mode 100644 vendor/github.com/containers/storage/internal/dedup/dedup.go create mode 100644 vendor/github.com/containers/storage/internal/dedup/dedup_linux.go create mode 100644 vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go create mode 100644 vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock.go create mode 100644 vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_unix.go create mode 100644 vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_windows.go create mode 100644 vendor/github.com/containers/storage/internal/staging_lockfile/staging_lockfile.go create mode 100644 vendor/github.com/containers/storage/internal/tempdir/tempdir.go delete mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go delete mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go rename vendor/github.com/containers/storage/pkg/chunked/internal/{ => minimal}/compression.go (94%) create mode 100644 vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_netbsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go create mode 100644 vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go create mode 100644 vendor/github.com/docker/docker/api/types/build/build.go create mode 100644 vendor/github.com/docker/docker/api/types/build/cache.go create mode 100644 vendor/github.com/docker/docker/api/types/build/disk_usage.go rename vendor/github.com/docker/docker/api/types/{ => common}/id_response.go (87%) create mode 100644 vendor/github.com/docker/docker/api/types/container/commit.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_top.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go create mode 100644 vendor/github.com/docker/docker/api/types/container/disk_usage.go create mode 100644 vendor/github.com/docker/docker/api/types/container/health.go create mode 100644 vendor/github.com/docker/docker/api/types/container/network_settings.go rename vendor/github.com/docker/docker/api/types/{ => container}/port.go (96%) create mode 100644 vendor/github.com/docker/docker/api/types/container/state.go create mode 100644 vendor/github.com/docker/docker/api/types/container/top_response.go create mode 100644 vendor/github.com/docker/docker/api/types/container/update_response.go create mode 100644 vendor/github.com/docker/docker/api/types/image/disk_usage.go create mode 100644 vendor/github.com/docker/docker/api/types/image/image_inspect.go rename vendor/github.com/docker/docker/api/types/{graph_driver_data.go => storage/driver_data.go} (75%) create mode 100644 vendor/github.com/docker/docker/api/types/system/disk_usage.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/disk_usage.go rename vendor/github.com/docker/docker/client/{interface_experimental.go => checkpoint.go} (72%) rename vendor/github.com/docker/docker/client/{interface.go => client_interfaces.go} (77%) create mode 100644 vendor/github.com/docker/docker/client/image_history_opts.go create mode 100644 vendor/github.com/docker/docker/client/image_inspect_opts.go create mode 100644 vendor/github.com/docker/docker/client/image_load_opts.go create mode 100644 vendor/github.com/docker/docker/client/image_save_opts.go delete mode 100644 vendor/github.com/docker/docker/client/interface_stable.go delete mode 100644 vendor/github.com/docker/docker/errdefs/defs.go delete mode 100644 vendor/github.com/docker/docker/errdefs/doc.go delete mode 100644 vendor/github.com/docker/docker/errdefs/helpers.go delete mode 100644 vendor/github.com/docker/docker/errdefs/http_helpers.go delete mode 100644 vendor/github.com/docker/docker/errdefs/is.go create mode 100644 vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go delete mode 100644 vendor/github.com/go-openapi/analysis/.codecov.yml delete mode 100644 vendor/github.com/go-openapi/analysis/.gitattributes delete mode 100644 vendor/github.com/go-openapi/analysis/.gitignore delete mode 100644 vendor/github.com/go-openapi/analysis/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/analysis/README.md delete mode 100644 vendor/github.com/go-openapi/analysis/analyzer.go delete mode 100644 vendor/github.com/go-openapi/analysis/debug.go delete mode 100644 vendor/github.com/go-openapi/analysis/doc.go delete mode 100644 vendor/github.com/go-openapi/analysis/fixer.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten_name.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten_options.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/debug/debug.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go delete mode 100644 vendor/github.com/go-openapi/analysis/mixin.go delete mode 100644 vendor/github.com/go-openapi/analysis/schema.go delete mode 100644 vendor/github.com/go-openapi/errors/.gitattributes delete mode 100644 vendor/github.com/go-openapi/errors/.gitignore delete mode 100644 vendor/github.com/go-openapi/errors/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/errors/README.md delete mode 100644 vendor/github.com/go-openapi/errors/api.go delete mode 100644 vendor/github.com/go-openapi/errors/auth.go delete mode 100644 vendor/github.com/go-openapi/errors/doc.go delete mode 100644 vendor/github.com/go-openapi/errors/headers.go delete mode 100644 vendor/github.com/go-openapi/errors/middleware.go delete mode 100644 vendor/github.com/go-openapi/errors/parsing.go delete mode 100644 vendor/github.com/go-openapi/errors/schema.go delete mode 100644 vendor/github.com/go-openapi/jsonpointer/.editorconfig delete mode 100644 vendor/github.com/go-openapi/jsonpointer/.gitignore delete mode 100644 vendor/github.com/go-openapi/jsonpointer/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/jsonpointer/LICENSE delete mode 100644 vendor/github.com/go-openapi/jsonpointer/README.md delete mode 100644 vendor/github.com/go-openapi/jsonpointer/pointer.go delete mode 100644 vendor/github.com/go-openapi/jsonreference/.gitignore delete mode 100644 vendor/github.com/go-openapi/jsonreference/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/jsonreference/LICENSE delete mode 100644 vendor/github.com/go-openapi/jsonreference/README.md delete mode 100644 vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go delete mode 100644 vendor/github.com/go-openapi/jsonreference/reference.go delete mode 100644 vendor/github.com/go-openapi/loads/.editorconfig delete mode 100644 vendor/github.com/go-openapi/loads/.gitignore delete mode 100644 vendor/github.com/go-openapi/loads/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/loads/.travis.yml delete mode 100644 vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/loads/LICENSE delete mode 100644 vendor/github.com/go-openapi/loads/README.md delete mode 100644 vendor/github.com/go-openapi/loads/doc.go delete mode 100644 vendor/github.com/go-openapi/loads/loaders.go delete mode 100644 vendor/github.com/go-openapi/loads/options.go delete mode 100644 vendor/github.com/go-openapi/loads/spec.go delete mode 100644 vendor/github.com/go-openapi/runtime/.editorconfig delete mode 100644 vendor/github.com/go-openapi/runtime/.gitattributes delete mode 100644 vendor/github.com/go-openapi/runtime/.gitignore delete mode 100644 vendor/github.com/go-openapi/runtime/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/runtime/LICENSE delete mode 100644 vendor/github.com/go-openapi/runtime/README.md delete mode 100644 vendor/github.com/go-openapi/runtime/bytestream.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_auth_info.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_operation.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_request.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_response.go delete mode 100644 vendor/github.com/go-openapi/runtime/constants.go delete mode 100644 vendor/github.com/go-openapi/runtime/csv.go delete mode 100644 vendor/github.com/go-openapi/runtime/csv_options.go delete mode 100644 vendor/github.com/go-openapi/runtime/discard.go delete mode 100644 vendor/github.com/go-openapi/runtime/file.go delete mode 100644 vendor/github.com/go-openapi/runtime/headers.go delete mode 100644 vendor/github.com/go-openapi/runtime/interfaces.go delete mode 100644 vendor/github.com/go-openapi/runtime/json.go delete mode 100644 vendor/github.com/go-openapi/runtime/request.go delete mode 100644 vendor/github.com/go-openapi/runtime/statuses.go delete mode 100644 vendor/github.com/go-openapi/runtime/text.go delete mode 100644 vendor/github.com/go-openapi/runtime/values.go delete mode 100644 vendor/github.com/go-openapi/runtime/xml.go delete mode 100644 vendor/github.com/go-openapi/spec/.editorconfig delete mode 100644 vendor/github.com/go-openapi/spec/.gitignore delete mode 100644 vendor/github.com/go-openapi/spec/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/spec/LICENSE delete mode 100644 vendor/github.com/go-openapi/spec/README.md delete mode 100644 vendor/github.com/go-openapi/spec/cache.go delete mode 100644 vendor/github.com/go-openapi/spec/contact_info.go delete mode 100644 vendor/github.com/go-openapi/spec/debug.go delete mode 100644 vendor/github.com/go-openapi/spec/embed.go delete mode 100644 vendor/github.com/go-openapi/spec/errors.go delete mode 100644 vendor/github.com/go-openapi/spec/expander.go delete mode 100644 vendor/github.com/go-openapi/spec/external_docs.go delete mode 100644 vendor/github.com/go-openapi/spec/header.go delete mode 100644 vendor/github.com/go-openapi/spec/info.go delete mode 100644 vendor/github.com/go-openapi/spec/items.go delete mode 100644 vendor/github.com/go-openapi/spec/license.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer_nonwindows.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer_windows.go delete mode 100644 vendor/github.com/go-openapi/spec/operation.go delete mode 100644 vendor/github.com/go-openapi/spec/parameter.go delete mode 100644 vendor/github.com/go-openapi/spec/path_item.go delete mode 100644 vendor/github.com/go-openapi/spec/paths.go delete mode 100644 vendor/github.com/go-openapi/spec/properties.go delete mode 100644 vendor/github.com/go-openapi/spec/ref.go delete mode 100644 vendor/github.com/go-openapi/spec/resolver.go delete mode 100644 vendor/github.com/go-openapi/spec/response.go delete mode 100644 vendor/github.com/go-openapi/spec/responses.go delete mode 100644 vendor/github.com/go-openapi/spec/schema.go delete mode 100644 vendor/github.com/go-openapi/spec/schema_loader.go delete mode 100644 vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json delete mode 100644 vendor/github.com/go-openapi/spec/schemas/v2/schema.json delete mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go delete mode 100644 vendor/github.com/go-openapi/spec/spec.go delete mode 100644 vendor/github.com/go-openapi/spec/swagger.go delete mode 100644 vendor/github.com/go-openapi/spec/tag.go delete mode 100644 vendor/github.com/go-openapi/spec/url_go19.go delete mode 100644 vendor/github.com/go-openapi/spec/validations.go delete mode 100644 vendor/github.com/go-openapi/spec/xml_object.go delete mode 100644 vendor/github.com/go-openapi/strfmt/.editorconfig delete mode 100644 vendor/github.com/go-openapi/strfmt/.gitattributes delete mode 100644 vendor/github.com/go-openapi/strfmt/.gitignore delete mode 100644 vendor/github.com/go-openapi/strfmt/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/strfmt/LICENSE delete mode 100644 vendor/github.com/go-openapi/strfmt/README.md delete mode 100644 vendor/github.com/go-openapi/strfmt/bson.go delete mode 100644 vendor/github.com/go-openapi/strfmt/date.go delete mode 100644 vendor/github.com/go-openapi/strfmt/default.go delete mode 100644 vendor/github.com/go-openapi/strfmt/doc.go delete mode 100644 vendor/github.com/go-openapi/strfmt/duration.go delete mode 100644 vendor/github.com/go-openapi/strfmt/format.go delete mode 100644 vendor/github.com/go-openapi/strfmt/time.go delete mode 100644 vendor/github.com/go-openapi/strfmt/ulid.go delete mode 100644 vendor/github.com/go-openapi/swag/.editorconfig delete mode 100644 vendor/github.com/go-openapi/swag/.gitattributes delete mode 100644 vendor/github.com/go-openapi/swag/.gitignore delete mode 100644 vendor/github.com/go-openapi/swag/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/swag/BENCHMARK.md delete mode 100644 vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/swag/LICENSE delete mode 100644 vendor/github.com/go-openapi/swag/README.md delete mode 100644 vendor/github.com/go-openapi/swag/convert.go delete mode 100644 vendor/github.com/go-openapi/swag/convert_types.go delete mode 100644 vendor/github.com/go-openapi/swag/doc.go delete mode 100644 vendor/github.com/go-openapi/swag/file.go delete mode 100644 vendor/github.com/go-openapi/swag/initialism_index.go delete mode 100644 vendor/github.com/go-openapi/swag/json.go delete mode 100644 vendor/github.com/go-openapi/swag/loading.go delete mode 100644 vendor/github.com/go-openapi/swag/name_lexem.go delete mode 100644 vendor/github.com/go-openapi/swag/net.go delete mode 100644 vendor/github.com/go-openapi/swag/path.go delete mode 100644 vendor/github.com/go-openapi/swag/split.go delete mode 100644 vendor/github.com/go-openapi/swag/string_bytes.go delete mode 100644 vendor/github.com/go-openapi/swag/util.go delete mode 100644 vendor/github.com/go-openapi/swag/yaml.go delete mode 100644 vendor/github.com/go-openapi/validate/.editorconfig delete mode 100644 vendor/github.com/go-openapi/validate/.gitattributes delete mode 100644 vendor/github.com/go-openapi/validate/.gitignore delete mode 100644 vendor/github.com/go-openapi/validate/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/validate/BENCHMARK.md delete mode 100644 vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/validate/LICENSE delete mode 100644 vendor/github.com/go-openapi/validate/README.md delete mode 100644 vendor/github.com/go-openapi/validate/context.go delete mode 100644 vendor/github.com/go-openapi/validate/debug.go delete mode 100644 vendor/github.com/go-openapi/validate/default_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/doc.go delete mode 100644 vendor/github.com/go-openapi/validate/example_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/formats.go delete mode 100644 vendor/github.com/go-openapi/validate/helpers.go delete mode 100644 vendor/github.com/go-openapi/validate/object_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/options.go delete mode 100644 vendor/github.com/go-openapi/validate/pools.go delete mode 100644 vendor/github.com/go-openapi/validate/pools_debug.go delete mode 100644 vendor/github.com/go-openapi/validate/result.go delete mode 100644 vendor/github.com/go-openapi/validate/rexp.go delete mode 100644 vendor/github.com/go-openapi/validate/schema.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_messages.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_option.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_props.go delete mode 100644 vendor/github.com/go-openapi/validate/slice_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/spec.go delete mode 100644 vendor/github.com/go-openapi/validate/spec_messages.go delete mode 100644 vendor/github.com/go-openapi/validate/type.go delete mode 100644 vendor/github.com/go-openapi/validate/update-fixtures.sh delete mode 100644 vendor/github.com/go-openapi/validate/validator.go delete mode 100644 vendor/github.com/go-openapi/validate/values.go delete mode 100644 vendor/github.com/josharian/intern/README.md delete mode 100644 vendor/github.com/josharian/intern/intern.go delete mode 100644 vendor/github.com/josharian/intern/license.md delete mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/internal/le/le.go create mode 100644 vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go create mode 100644 vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go delete mode 100644 vendor/github.com/mailru/easyjson/LICENSE delete mode 100644 vendor/github.com/mailru/easyjson/buffer/pool.go delete mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr.go delete mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go delete mode 100644 vendor/github.com/mailru/easyjson/jlexer/error.go delete mode 100644 vendor/github.com/mailru/easyjson/jlexer/lexer.go delete mode 100644 vendor/github.com/mailru/easyjson/jwriter/writer.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE delete mode 100644 vendor/github.com/mitchellh/mapstructure/README.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/error.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go delete mode 100644 vendor/github.com/moby/sys/capability/.codespellrc delete mode 100644 vendor/github.com/moby/sys/capability/.golangci.yml create mode 100644 vendor/github.com/moby/sys/user/idtools.go create mode 100644 vendor/github.com/moby/sys/user/idtools_unix.go create mode 100644 vendor/github.com/moby/sys/user/idtools_windows.go delete mode 100644 vendor/github.com/oklog/ulid/.gitignore delete mode 100644 vendor/github.com/oklog/ulid/.travis.yml delete mode 100644 vendor/github.com/oklog/ulid/AUTHORS.md delete mode 100644 vendor/github.com/oklog/ulid/CHANGELOG.md delete mode 100644 vendor/github.com/oklog/ulid/CONTRIBUTING.md delete mode 100644 vendor/github.com/oklog/ulid/Gopkg.lock delete mode 100644 vendor/github.com/oklog/ulid/Gopkg.toml delete mode 100644 vendor/github.com/oklog/ulid/README.md delete mode 100644 vendor/github.com/oklog/ulid/ulid.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/LICENSE delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go delete mode 100644 vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go delete mode 100644 vendor/github.com/proglottis/gpgme/callbacks.go rename vendor/github.com/sigstore/{rekor => protobuf-specs}/COPYRIGHT.txt (93%) rename vendor/github.com/{go-openapi/analysis => sigstore/protobuf-specs}/LICENSE (100%) create mode 100644 vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go delete mode 100644 vendor/github.com/sigstore/rekor/CONTRIBUTORS.md delete mode 100644 vendor/github.com/sigstore/rekor/LICENSE delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/error.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/.gitignore (88%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/LICENSE (100%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/Makefile (100%) create mode 100644 vendor/github.com/smallstep/pkcs7/README.md rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/ber.go (91%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/decrypt.go (62%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/encrypt.go (74%) create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/pkcs7.go (70%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/sign.go (100%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/verify.go (87%) delete mode 100644 vendor/go.mongodb.org/mongo-driver/LICENSE delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/decoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/encoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/marshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_value.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go delete mode 100644 vendor/go.mozilla.org/pkcs7/README.md delete mode 100644 vendor/go.mozilla.org/pkcs7/verify_test_dsa.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md rename vendor/{github.com/oklog/ulid => go.opentelemetry.io/auto/sdk}/LICENSE (100%) create mode 100644 vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md create mode 100644 vendor/go.opentelemetry.io/auto/sdk/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/limit.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go create mode 100644 vendor/go.opentelemetry.io/otel/dependencies.Dockerfile create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop/README.md create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop/noop.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/doc.go (96%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/exception.go (98%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/metric.go (77%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/schema.go (85%) create mode 100644 vendor/go.opentelemetry.io/otel/trace/auto.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop/README.md create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop/noop.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/span.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracer.go delete mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh create mode 100644 vendor/go.opentelemetry.io/otel/verify_released_changelog.sh create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/builder.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/string.go rename vendor/golang.org/x/crypto/internal/poly1305/{sum_amd64.go => sum_asm.go} (94%) create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go delete mode 100644 vendor/golang.org/x/exp/LICENSE delete mode 100644 vendor/golang.org/x/exp/PATENTS delete mode 100644 vendor/golang.org/x/exp/maps/maps.go create mode 100644 vendor/golang.org/x/net/http2/config.go create mode 100644 vendor/golang.org/x/net/http2/config_go124.go create mode 100644 vendor/golang.org/x/net/http2/config_pre_go124.go create mode 100644 vendor/golang.org/x/net/http2/unencrypted.go create mode 100644 vendor/golang.org/x/net/internal/httpcommon/ascii.go rename vendor/golang.org/x/net/{http2 => internal/httpcommon}/headermap.go (74%) create mode 100644 vendor/golang.org/x/net/internal/httpcommon/request.go rename vendor/{github.com/go-openapi/errors => google.golang.org/genproto/googleapis/api}/LICENSE (100%) create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go create mode 100644 vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go rename vendor/google.golang.org/grpc/{internal/grpcsync/oncefunc.go => balancer/pickfirst/internal/internal.go} (55%) create mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go create mode 100644 vendor/google.golang.org/grpc/balancer/subconn.go create mode 100644 vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/client_stream.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/server_stream.go create mode 100644 vendor/google.golang.org/grpc/stats/metrics.go delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/name.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/bitmap.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/equal.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_opaque.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/presence.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go create mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe_go121.go => strings_unsafe.go} (96%) delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrapperopaque.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe_go121.go => value_unsafe.go} (96%) delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go diff --git a/go.mod b/go.mod index 6b45a77f..33eac18d 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,12 @@ module github.com/crazy-max/undock -go 1.23.0 +go 1.23.3 require ( github.com/alecthomas/kong v1.10.0 github.com/containerd/platforms v0.2.1 - github.com/containers/image/v5 v5.33.1 - github.com/docker/docker v27.3.1+incompatible + github.com/containers/image/v5 v5.36.0 + github.com/docker/docker v28.3.2+incompatible github.com/mholt/archives v0.1.1 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 @@ -14,122 +14,107 @@ require ( github.com/rs/zerolog v1.34.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.10.0 - golang.org/x/sync v0.13.0 - golang.org/x/sys v0.32.0 + golang.org/x/sync v0.16.0 + golang.org/x/sys v0.34.0 ) require ( - dario.cat/mergo v1.0.1 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect + dario.cat/mergo v1.0.2 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/Microsoft/hcsshim v0.12.9 // indirect + github.com/Microsoft/hcsshim v0.13.0 // indirect github.com/STARRY-S/zip v0.2.1 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/andybalholm/brotli v1.1.1 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/bodgit/plumbing v1.3.0 // indirect github.com/bodgit/sevenzip v1.6.0 // indirect github.com/bodgit/windows v1.0.1 // indirect - github.com/containerd/cgroups/v3 v3.0.3 // indirect - github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect + github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect - github.com/containerd/typeurl/v2 v2.2.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect - github.com/containers/ocicrypt v1.2.0 // indirect - github.com/containers/storage v1.56.1 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect - github.com/cyphar/filepath-securejoin v0.3.4 // indirect + github.com/containers/ocicrypt v1.2.1 // indirect + github.com/containers/storage v1.59.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect + github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-jose/go-jose/v4 v4.0.4 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-jose/go-jose/v4 v4.0.5 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.23.0 // indirect - github.com/go-openapi/errors v0.22.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/loads v0.22.0 // indirect - github.com/go-openapi/runtime v0.28.0 // indirect - github.com/go-openapi/spec v0.21.0 // indirect - github.com/go-openapi/strfmt v0.23.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect - github.com/go-openapi/validate v0.24.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/go-containerregistry v0.20.2 // indirect + github.com/google/go-containerregistry v0.20.3 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect - github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect - github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mattn/go-sqlite3 v1.14.24 // indirect + github.com/mattn/go-sqlite3 v1.14.28 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/minio/minlz v1.0.0 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/sys/capability v0.3.0 // indirect + github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect - github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nwaples/rardecode/v2 v2.1.0 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect - github.com/opencontainers/selinux v1.11.1 // indirect - github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect + github.com/opencontainers/runtime-spec v1.2.1 // indirect + github.com/opencontainers/selinux v1.12.0 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/proglottis/gpgme v0.1.3 // indirect + github.com/proglottis/gpgme v0.1.4 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/sigstore/fulcio v1.6.4 // indirect - github.com/sigstore/rekor v1.3.6 // indirect - github.com/sigstore/sigstore v1.8.9 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect + github.com/sigstore/fulcio v1.6.6 // indirect + github.com/sigstore/protobuf-specs v0.4.1 // indirect + github.com/sigstore/sigstore v1.9.5 // indirect + github.com/smallstep/pkcs7 v0.1.1 // indirect github.com/sorairolake/lzip-go v0.3.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect - github.com/sylabs/sif/v2 v2.19.1 // indirect - github.com/tchap/go-patricia/v2 v2.3.1 // indirect + github.com/sylabs/sif/v2 v2.21.1 // indirect + github.com/tchap/go-patricia/v2 v2.3.3 // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.12 // indirect - github.com/vbatts/tar-split v0.11.6 // indirect - github.com/vbauerster/mpb/v8 v8.8.3 // indirect - go.mongodb.org/mongo-driver v1.14.0 // indirect - go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + github.com/vbauerster/mpb/v8 v8.10.2 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect - golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.67.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/crypto v0.40.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/term v0.33.0 // indirect + golang.org/x/text v0.27.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect + google.golang.org/grpc v1.72.2 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 3525ebbf..46ca101f 100644 --- a/go.sum +++ b/go.sum @@ -14,21 +14,19 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= -dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= -github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= -github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= +github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA= +github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok= github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg= github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= @@ -43,8 +41,6 @@ github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU= @@ -63,47 +59,47 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= -github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= -github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= -github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= -github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= -github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= -github.com/containers/image/v5 v5.33.1 h1:nTWKwxAlY0aJrilvvhssqssJVnley6VqxkLiLzTEYIs= -github.com/containers/image/v5 v5.33.1/go.mod h1:/FJiLlvVbeBxWNMPVPPIWJxHTAzwBoFvyN0a51zo1CE= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= +github.com/containers/image/v5 v5.36.0 h1:Zh+xFcLjRmicnOT5AFPHH/xj+e3s9ojDN/9X2Kx1+Jo= +github.com/containers/image/v5 v5.36.0/go.mod h1:VZ6cyDHbxZoOt4dklUJ+WNEH9FrgSgfH3qUBYKFlcT0= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM= -github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U= -github.com/containers/storage v1.56.1 h1:gDZj/S6Zxus4Xx42X6iNB3ODXuh0qoOdH/BABfrvcKo= -github.com/containers/storage v1.56.1/go.mod h1:c6WKowcAlED/DkWGNuL9bvGYqIWCVy7isRMdCSKWNjk= +github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= +github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= +github.com/containers/storage v1.59.0 h1:r2pYSTzQpJTROZbjJQ54Z0GT+rUC6+wHzlSY8yPjsXk= +github.com/containers/storage v1.59.0/go.mod h1:KoAYHnAjP3/cTsRS+mmWZGkufSY2GACiKQ4V3ZLQnR0= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= -github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8= -github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= +github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ= -github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.3.2+incompatible h1:mOt9fcLE7zaACbxW1GeS65RI67wIJrTnqS3hP2huFsY= +github.com/docker/cli v28.3.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v28.3.2+incompatible h1:wn66NJ6pWB1vBZIilP8G3qQPqHy5XymfYn5vsqeA5oA= +github.com/docker/docker v28.3.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= @@ -121,33 +117,13 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= -github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= -github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= -github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= -github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= -github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= -github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= -github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= -github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= -github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= -github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= -github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= -github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -157,8 +133,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -185,10 +161,11 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= -github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -205,8 +182,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -221,8 +198,6 @@ github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSo github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= @@ -230,8 +205,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -244,8 +219,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -253,8 +226,8 @@ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APP github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mholt/archives v0.1.1 h1:c7J3qXN1FB54y0qiUXiq9Bxk4eCUc8pdXWwOhZdRzeY= github.com/mholt/archives v0.1.1/go.mod h1:FQVz01Q2uXKB/35CXeW/QFO23xT+hSCGZHVtha78U4I= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= @@ -263,18 +236,20 @@ github.com/minio/minlz v1.0.0 h1:Kj7aJZ1//LlTP1DM8Jm7lNKvvJS2m74gyyXXn3+uJWQ= github.com/minio/minlz v1.0.0/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec= github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/sys/capability v0.3.0 h1:kEP+y6te0gEXIaeQhIi0s7vKs/w0RPoH1qPa6jROcVg= -github.com/moby/sys/capability v0.3.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk= +github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -286,18 +261,14 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nwaples/rardecode/v2 v2.1.0 h1:JQl9ZoBPDy+nIZGb1mx8+anfHp/LV3NE2MjMiv0ct/U= github.com/nwaples/rardecode/v2 v2.1.0/go.mod h1:7uz379lSxPe6j9nvzxUZ+n7mnJNgjsRNb6IbvGVHRmw= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= -github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= -github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8= +github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -305,41 +276,45 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= -github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= -github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= -github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= +github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY= -github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY= github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= -github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= -github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY= -github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs= -github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= -github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk= -github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w= +github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw= +github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk= +github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= +github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= +github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU= +github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu0Cwg= github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= @@ -355,10 +330,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0= -github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E= -github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= -github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/sylabs/sif/v2 v2.21.1 h1:GZ0b5//AFAqJEChd8wHV/uSKx/l1iuGYwjR8nx+4wPI= +github.com/sylabs/sif/v2 v2.21.1/go.mod h1:YoqEGQnb5x/ItV653bawXHZJOXQaEWpGwHsSD3YePJI= +github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc= +github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= @@ -366,47 +341,41 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHT github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= -github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= -github.com/vbauerster/mpb/v8 v8.8.3 h1:dTOByGoqwaTJYPubhVz3lO5O6MK553XVgUo33LdnNsQ= -github.com/vbauerster/mpb/v8 v8.8.3/go.mod h1:JfCCrtcMsJwP6ZwMn9e5LMnNyp3TVNpUWWkN+nd4EWk= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM= +github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= -go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= -go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= -go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -415,8 +384,12 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -425,8 +398,6 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= -golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -446,6 +417,10 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -464,9 +439,14 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -480,8 +460,13 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -497,20 +482,31 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -518,12 +514,17 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -551,6 +552,9 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -582,11 +586,10 @@ google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -596,8 +599,8 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= -google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= +google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -607,8 +610,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -618,10 +621,8 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/dario.cat/mergo/FUNDING.json b/vendor/dario.cat/mergo/FUNDING.json new file mode 100644 index 00000000..0585e1fe --- /dev/null +++ b/vendor/dario.cat/mergo/FUNDING.json @@ -0,0 +1,7 @@ +{ + "drips": { + "ethereum": { + "ownedBy": "0x6160020e7102237aC41bdb156e94401692D76930" + } + } +} diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md index 0b3c4888..0e4a59af 100644 --- a/vendor/dario.cat/mergo/README.md +++ b/vendor/dario.cat/mergo/README.md @@ -85,7 +85,6 @@ Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/depend * [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) * [go-micro/go-micro](https://github.com/go-micro/go-micro) * [grafana/loki](https://github.com/grafana/loki) -* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) * [masterminds/sprig](github.com/Masterminds/sprig) * [moby/moby](https://github.com/moby/moby) * [slackhq/nebula](https://github.com/slackhq/nebula) @@ -191,10 +190,6 @@ func main() { } ``` -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - ### Transformers Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md index a5de61f7..3788fcc1 100644 --- a/vendor/dario.cat/mergo/SECURITY.md +++ b/vendor/dario.cat/mergo/SECURITY.md @@ -4,8 +4,8 @@ | Version | Supported | | ------- | ------------------ | -| 0.3.x | :white_check_mark: | -| < 0.3 | :x: | +| 1.x.x | :white_check_mark: | +| < 1.0 | :x: | ## Security contact information diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 639e6c39..235496ee 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages. Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). -Documentation: https://godocs.io/github.com/BurntSushi/toml +Documentation: https://pkg.go.dev/github.com/BurntSushi/toml See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 7aaf462c..3fa516ca 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { return md.unify(primValue.undecoded, rvalue(v)) } +// markDecodedRecursive is a helper to mark any key under the given tmap as +// decoded, recursing as needed +func markDecodedRecursive(md *MetaData, tmap map[string]any) { + for key := range tmap { + md.decoded[md.context.add(key).String()] = struct{}{} + if tmap, ok := tmap[key].(map[string]any); ok { + md.context = append(md.context, key) + markDecodedRecursive(md, tmap) + md.context = md.context[0 : len(md.context)-1] + } + } +} + // unify performs a sort of type unification based on the structure of `rv`, // which is the client representation. // @@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error { if err != nil { return md.parseErr(err) } + // Assume the Unmarshaler decoded everything, so mark all keys under + // this table as decoded. + if tmap, ok := data.(map[string]any); ok { + markDecodedRecursive(md, tmap) + } + if aot, ok := data.([]map[string]any); ok { + for _, tmap := range aot { + markDecodedRecursive(md, tmap) + } + } return nil } if v, ok := rvi.(encoding.TextUnmarshaler); ok { @@ -540,12 +563,14 @@ func (md *MetaData) badtype(dst string, data any) error { func (md *MetaData) parseErr(err error) error { k := md.context.String() + d := string(md.data) return ParseError{ + Message: err.Error(), + err: err, LastKey: k, - Position: md.keyInfo[k].pos, + Position: md.keyInfo[k].pos.withCol(d), Line: md.keyInfo[k].pos.Line, - err: err, - input: string(md.data), + input: d, } } diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 73366c0d..ac196e7d 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { // Sort keys so that we have deterministic output. And write keys directly // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string + var mapKeysDirect, mapKeysSub []reflect.Value for _, mapKey := range rv.MapKeys() { - k := mapKey.String() if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { - mapKeysSub = append(mapKeysSub, k) + mapKeysSub = append(mapKeysSub, mapKey) } else { - mapKeysDirect = append(mapKeysDirect, k) + mapKeysDirect = append(mapKeysDirect, mapKey) } } - var writeMapKeys = func(mapKeys []string, trailC bool) { - sort.Strings(mapKeys) + writeMapKeys := func(mapKeys []reflect.Value, trailC bool) { + sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() }) for i, mapKey := range mapKeys { - val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + val := eindirect(rv.MapIndex(mapKey)) if isNil(val) { continue } if inline { - enc.writeKeyValue(Key{mapKey}, val, true) + enc.writeKeyValue(Key{mapKey.String()}, val, true) if trailC || i != len(mapKeys)-1 { enc.wf(", ") } } else { - enc.encode(key.add(mapKey), val) + enc.encode(key.add(mapKey.String()), val) } } } @@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { } } -const is32Bit = (32 << (^uint(0) >> 63)) == 32 - func pointerTo(t reflect.Type) reflect.Type { if t.Kind() == reflect.Ptr { return pointerTo(t.Elem()) @@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { frv := eindirect(rv.Field(i)) - if is32Bit { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - copyStart := make([]int, len(start)) - copy(copyStart, start) - start = copyStart - } + // Need to make a copy because ... ehm, I don't know why... I guess + // allocating a new array can cause it to fail(?) + // + // Done for: https://github.com/BurntSushi/toml/issues/430 + // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314 + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. @@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { } addFields(rt, rv, nil) - writeFields := func(fields [][]int) { + writeFields := func(fields [][]int, totalFields int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex) @@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{keyName}, fieldVal, true) - if fieldIndex[0] != len(fields)-1 { + if fieldIndex[0] != totalFields-1 { enc.wf(", ") } } else { @@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.wf("{") } - writeFields(fieldsDirect) - writeFields(fieldsSub) + + l := len(fieldsDirect) + len(fieldsSub) + writeFields(fieldsDirect, l) + writeFields(fieldsSub, l) if inline { enc.wf("}") } diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index b45a3f45..b7077d3a 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -67,21 +67,36 @@ type ParseError struct { // Position of an error. type Position struct { Line int // Line number, starting at 1. + Col int // Error column, starting at 1. Start int // Start of error, as byte offset starting at 0. - Len int // Lenght in bytes. + Len int // Length of the error in bytes. } -func (pe ParseError) Error() string { - msg := pe.Message - if msg == "" { // Error from errorf() - msg = pe.err.Error() +func (p Position) withCol(tomlFile string) Position { + var ( + pos int + lines = strings.Split(tomlFile, "\n") + ) + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= p.Start { + p.Col = p.Start - pos + 1 + if p.Col < 1 { // Should never happen, but just in case. + p.Col = 1 + } + break + } + pos += ll } + return p +} +func (pe ParseError) Error() string { if pe.LastKey == "" { - return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message) } return fmt.Sprintf("toml: line %d (last key %q): %s", - pe.Position.Line, pe.LastKey, msg) + pe.Position.Line, pe.LastKey, pe.Message) } // ErrorWithPosition returns the error with detailed location context. @@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string { return pe.Error() } + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + var ( lines = strings.Split(pe.input, "\n") - col = pe.column(lines) b = new(strings.Builder) ) - - msg := pe.Message - if msg == "" { - msg = pe.err.Error() - } - - // TODO: don't show control characters as literals? This may not show up - // well everywhere. - if pe.Position.Len == 1 { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", - msg, pe.Position.Line, col+1) + pe.Message, pe.Position.Line, pe.Position.Col) } else { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", - msg, pe.Position.Line, col, col+pe.Position.Len) + pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1) } if pe.Position.Line > 2 { fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) @@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string { diff := len(expanded) - len(lines[pe.Position.Line-1]) fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) - fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len)) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len)) return b.String() } @@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string { return m } -func (pe ParseError) column(lines []string) int { - var pos, col int - for i := range lines { - ll := len(lines[i]) + 1 // +1 for the removed newline - if pos+ll >= pe.Position.Start { - col = pe.Position.Start - pos - if col < 0 { // Should never happen, but just in case. - col = 0 - } - break - } - pos += ll - } - - return col -} - func expandTab(s string) string { var ( b strings.Builder diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index a1016d98..1c3b4770 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn { func (lx *lexer) errorf(format string, values ...any) stateFn { if lx.atEOF { pos := lx.getPos() - pos.Line-- + if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' { + pos.Line-- + } pos.Len = 1 pos.Start = lx.pos - 1 lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} @@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn { lx.emit(itemKeyEnd) return lexSkip(lx, lexValue) default: + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r)) + } return lx.errorf("expected '.' or '=', but got %q instead", r) } } @@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn { if r == eof { return lx.errorf("unexpected EOF; expected value") } + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r)) + } return lx.errorf("expected value but found %q instead", r) } @@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn { case 'x': r = lx.peek() if !isHex(r) { - lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r) } return lexHexInteger } @@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } func isBareKeyChar(r rune, tomlNext bool) bool { - if tomlNext { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' || - r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || - (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || - (r >= 0x037f && r <= 0x1fff) || - (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || - (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || - (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || - (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || - (r >= 0x10000 && r <= 0xeffff) - } - - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || r == '_' || r == '-' } diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index e6145373..0d337026 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string { // Like append(), but only increase the cap by 1. func (k Key) add(piece string) Key { - if cap(k) > len(k) { - return append(k, piece) - } newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 11ac3108..e3ea8a9a 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) { // it anyway. if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] - //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 data = data[3:] } @@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) { if i := strings.IndexRune(data[:ex], 0); i > -1 { return nil, ParseError{ Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", - Position: Position{Line: 1, Start: i, Len: 1}, + Position: Position{Line: 1, Col: 1, Start: i, Len: 1}, Line: 1, input: data, } @@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) { func (p *parser) panicErr(it item, err error) { panic(ParseError{ + Message: err.Error(), err: err, - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) @@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) { func (p *parser) panicItemf(it item, format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) @@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) { func (p *parser) panicf(format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: p.pos, + Position: p.pos.withCol(p.lx.input), Line: p.pos.Line, LastKey: p.current(), }) @@ -123,10 +123,11 @@ func (p *parser) next() item { if it.typ == itemError { if it.err != nil { panic(ParseError{ - Position: it.pos, + Message: it.err.Error(), + err: it.err, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Line, LastKey: p.current(), - err: it.err, }) } @@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool { } } - // isHexis a superset of all the permissable characters surrounding an + // isHex is a superset of all the permissible characters surrounding an // underscore. accept = isHex(r) } diff --git a/vendor/github.com/Microsoft/hcsshim/.clang-format b/vendor/github.com/Microsoft/hcsshim/.clang-format new file mode 100644 index 00000000..fd843ce3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.clang-format @@ -0,0 +1,12 @@ +Language: Cpp +BasedOnStyle: Microsoft +BreakBeforeBraces: Attach +PointerAlignment: Left +AllowShortFunctionsOnASingleLine: All +# match Go style +IndentCaseLabels: false +# don't break comments over line limit (needed for CodeQL exceptions) +ReflowComments: false +InsertNewlineAtEOF: true +KeepEmptyLines: + AtEndOfFile: true diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml index 7d38a2fb..113e6f07 100644 --- a/vendor/github.com/Microsoft/hcsshim/.golangci.yml +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -5,9 +5,6 @@ run: - admin - functional - integration - skip-dirs: - # paths are relative to module root - - cri-containerd/test-images linters: enable: @@ -34,13 +31,15 @@ linters-settings: # struct order is often for Win32 compat # also, ignore pointer bytes/GC issues for now until performance becomes an issue - fieldalignment - check-shadowing: true stylecheck: # https://staticcheck.io/docs/checks checks: ["all"] issues: + exclude-dirs: + # paths are relative to module root + - cri-containerd/test-images exclude-rules: # err is very often shadowed in nested scopes - linters: @@ -70,22 +69,22 @@ issues: - path: layer.go linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: hcsshim.go linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: cmd\\ncproxy\\nodenetsvc\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: cmd\\ncproxy_mock\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hcs\\schema2\\ linters: @@ -95,67 +94,67 @@ issues: - path: internal\\wclayer\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: hcn\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hcs\\schema1\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hns\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: ext4\\internal\\compactext4\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: ext4\\internal\\format\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\guestrequest\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\guest\\prot\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\windevice\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\winapi\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\vmcompute\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\regstate\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hcserror\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" # v0 APIs are deprecated, but still retained for backwards compatability - path: cmd\\ncproxy\\ @@ -171,4 +170,4 @@ issues: - path: internal\\vhdx\\info linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile index de643589..9a9f5b40 100644 --- a/vendor/github.com/Microsoft/hcsshim/Makefile +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -1,13 +1,20 @@ -BASE:=base.tar.gz -DEV_BUILD:=0 +include Makefile.bootfiles GO:=go GO_FLAGS:=-ldflags "-s -w" # strip Go binaries CGO_ENABLED:=0 GOMODVENDOR:= +KMOD:=0 CFLAGS:=-O2 -Wall -LDFLAGS:=-static -s # strip C binaries +LDFLAGS:=-static -s #strip C binaries +LDLIBS:= +PREPROCESSORFLAGS:= +ifeq "$(KMOD)" "1" +LDFLAGS:= -s +LDLIBS:= -lkmod +PREPROCESSORFLAGS:=-DMODULES=1 +endif GO_FLAGS_EXTRA:= ifeq "$(GOMODVENDOR)" "1" @@ -23,108 +30,14 @@ SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) # additional directories to search for rule prerequisites and targets VPATH=$(SRCROOT) -DELTA_TARGET=out/delta.tar.gz - -ifeq "$(DEV_BUILD)" "1" -DELTA_TARGET=out/delta-dev.tar.gz -endif - -ifeq "$(SNP_BUILD)" "1" -DELTA_TARGET=out/delta-snp.tar.gz -endif - # The link aliases for gcstools GCS_TOOLS=\ generichook \ install-drivers -# Common path prefix. -PATH_PREFIX:= -# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL) -VMGS_TOOL:= -IGVM_TOOL:= -KERNEL_PATH:= - -.PHONY: all always rootfs test snp simple - -.DEFAULT_GOAL := all - -all: out/initrd.img out/rootfs.tar.gz - -clean: - find -name '*.o' -print0 | xargs -0 -r rm - rm -rf bin deps rootfs out - test: cd $(SRCROOT) && $(GO) test -v ./internal/guest/... -rootfs: out/rootfs.vhd - -snp: out/kernelinitrd.vmgs out/rootfs.hash.vhd out/rootfs.vhd out/v2056.vmgs - -simple: out/simple.vmgs snp - -%.vmgs: %.bin - rm -f $@ - # du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes - $(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc` - $(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8 - -# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk. -out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh - rm -f $@ - python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" -rdinit out/initrd.img -vtl 0 - -ROOTFS_DEVICE:=/dev/sda -VERITY_DEVICE:=/dev/sdb -# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.) -out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh - rm -f $@ - python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0 - -# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line. -out/kernelinitrd.bin: out/rootfs.vhd out/rootfs.hash.vhd out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup.sh - rm -f $@ - python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" -vtl 0 - -# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash. -%.vhd: % bin/cmd/tar2ext4 - ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ - -# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4. -%.vhd: %.ext4 bin/cmd/tar2ext4 - ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ - -%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt - veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info - # Retrieve info required by dm-verity at boot time - # Get the blocksize of rootfs - cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest - cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt - cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize - cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize - cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks - echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors - -out/rootfs.hash.salt: - hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@ - -out/rootfs.ext4: out/rootfs.tar.gz bin/cmd/tar2ext4 - gzip -f -d ./out/rootfs.tar.gz - ./bin/cmd/tar2ext4 -i ./out/rootfs.tar -o $@ - -out/rootfs.tar.gz: out/initrd.img - rm -rf rootfs-conv - mkdir rootfs-conv - gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) - tar -zcf $@ -C rootfs-conv . - rm -rf rootfs-conv - -out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh - $(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed - gzip -c out/initrd.img.uncompressed > $@ - rm out/initrd.img.uncompressed - # This target includes utilities which may be useful for testing purposes. out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report rm -rf rootfs-dev @@ -168,10 +81,7 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho tar -zcf $@ -C rootfs . rm -rf rootfs -out/containerd-shim-runhcs-v1.exe: - GOOS=windows $(GO_BUILD) -o $@ $(SRCROOT)/cmd/containerd-shim-runhcs-v1 - -bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report bin/cmd/dmverity-vhd: +bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report: @mkdir -p $(dir $@) GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%) @@ -181,8 +91,8 @@ bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o bin/init: init/init.o vsockexec/vsock.o @mkdir -p bin - $(CC) $(LDFLAGS) -o $@ $^ + $(CC) $(LDFLAGS) -o $@ $^ $(LDLIBS) %.o: %.c @mkdir -p $(dir $@) - $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< \ No newline at end of file + $(CC) $(PREPROCESSORFLAGS) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles b/vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles new file mode 100644 index 00000000..e6f06d49 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles @@ -0,0 +1,197 @@ +BASE:=base.tar.gz +DEV_BUILD:=0 + +DELTA_TARGET=out/delta.tar.gz + +ifeq "$(DEV_BUILD)" "1" +DELTA_TARGET=out/delta-dev.tar.gz +endif + +ifeq "$(SNP_BUILD)" "1" +DELTA_TARGET=out/delta-snp.tar.gz +endif + +SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) + +PATH_PREFIX:= +# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL) +VMGS_TOOL:= +IGVM_TOOL:= +KERNEL_PATH:= +TAR2EXT4_TOOL:=bin/cmd/tar2ext4 + +ROOTFS_DEVICE:=/dev/sda +HASH_DEVICE:=/dev/sdb + +.PHONY: all always rootfs test snp simple + +.DEFAULT_GOAL := all + +all: out/initrd.img out/rootfs.tar.gz + +clean: + find -name '*.o' -print0 | xargs -0 -r rm + rm -rf bin rootfs out + +rootfs: out/rootfs.vhd + +snp: out/kernel.vmgs out/rootfs-verity.vhd out/v2056.vmgs out/v2056combined.vmgs + +simple: out/simple.vmgs snp + +%.vmgs: %.bin + rm -f $@ + # du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes + $(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc` + $(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8 + +# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk. +out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" \ + -rdinit out/initrd.img \ + -vtl 0 + +# The boot performance is optimized by supplying rootfs as a SCSI attachment. In this case the kernel boots with +# dm-verity to ensure the integrity. Similar to layer VHDs the verity Merkle tree is appended to ext4 filesystem. +# It transpires that the /dev/sd* order is not deterministic wrt the scsi device order. Thus build a single userland +# fs + merkle tree device and boot that. +# +# From https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-init.html +# +# dm-mod.create=,,,,[,
+][;,,,,
[,
+]+] +# +# where: +# ::= The device name. +# ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | "" +# ::= The device minor number | "" +# ::= "ro" | "rw" +#
::= +# ::= "verity" | "linear" | ... (see list below) +# +# From https://docs.kernel.org/admin-guide/device-mapper/verity.html +# +# +# +# +# [<#opt_params> ] +# +# typical igvm tool line once all the macros are expanded +# python3 /home/user/igvmfile.py -o out/v2056.bin -kernel /hose/user/bzImage -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0 +# +# so a kernel command line of: +# 8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption\" init=/startup_v2056.sh +# +# and a dm-mod.create of: +# dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption +# +# which breaks down to: +# +# name = "dmverity" +# uuid = "" +# minor = "" +# flags = "ro" +# table = 0 196744 verity "args" +# start_sector = 0 +# num_sectors = 196744 +# target_type = verity +# target_args = 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption +# args: +# version 1 +# dev /dev/sda +# hash_dev /dev/sdb +# data_block_size 4096 +# hash_block_size 4096 +# num_data_blocks 24593 +# hash_start_block 0 +# algorithm sha256 +# digest 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 +# salt b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba +# opt_params +# count = 1 +# ignore_corruption +# +# combined typical (not bigger count of sectors for the whole device) +# dmverity,,,ro,0 199672 verity 1 /dev/sda /dev/sda 4096 4096 24959 24959 sha256 4aa6e79866ee946ddbd9cddd6554bc6449272942fcc65934326817785a3bd374 adc4956274489c936395bab046a2d476f21ef436e571ba53da2fdf3aee59bf0a +# +# A few notes: +# - num_sectors is the size of the final (aka target) verity device, i.e. the size of our rootfs excluding the Merkle +# tree. +# - We don't add verity superblock, so the will be exactly at the end of ext4 filesystem and equal +# to its size. In the case when verity superblock is present an extra block should be added to the offset value, +# i.e. 24959 becomes 24960. + + +# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. +# Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.) +out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(HASH_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" \ + -vtl 0 + +out/v2056combined.bin: out/rootfs-verity.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh + rm -f $@ + echo root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" \ + -vtl 0 + +# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line. +out/kernel.bin: out/rootfs-verity.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup.sh + rm -f $@ + echo root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" \ + -vtl 0 + +# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash. +%.vhd: % $(TAR2EXT4_TOOL) + $(TAR2EXT4_TOOL) -only-vhd -i $< -o $@ + +# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4. +%.vhd: %.ext4 $(TAR2EXT4_TOOL) + $(TAR2EXT4_TOOL) -only-vhd -i $< -o $@ + +%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt + veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info + # Retrieve info required by dm-verity at boot time + # Get the blocksize of rootfs + cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest + cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt + cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize + cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize + cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks + echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors + +out/rootfs.hash.salt: + hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@ + +out/rootfs.ext4: out/rootfs.tar.gz $(TAR2EXT4_TOOL) + gzip -f -d ./out/rootfs.tar.gz + $(TAR2EXT4_TOOL) -i ./out/rootfs.tar -o $@ + +out/rootfs-verity.ext4: out/rootfs.ext4 out/rootfs.hash + cp out/rootfs.ext4 $@ + cat out/rootfs.hash >> $@ + +out/rootfs.tar.gz: out/initrd.img + rm -rf rootfs-conv + mkdir rootfs-conv + gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) + tar -zcf $@ -C rootfs-conv . + rm -rf rootfs-conv + +out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh + $(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed + gzip -c out/initrd.img.uncompressed > $@ + rm out/initrd.img.uncompressed diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md index 32043804..ae666826 100644 --- a/vendor/github.com/Microsoft/hcsshim/README.md +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -44,7 +44,7 @@ delta.tar.gz initrd.img rootfs.tar.gz ### Containerd Shim -For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). +For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/main/core/runtime/v2/README.md). Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 8ef611d6..fef2bf54 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -63,10 +63,10 @@ func (process *Process) SystemID() string { } func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { - switch err { //nolint:errorlint - case nil: + if err == nil { return true, nil - case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: + } + if errors.Is(err, ErrVmcomputeOperationInvalidState) || errors.Is(err, ErrComputeSystemDoesNotExist) || errors.Is(err, ErrElementNotFound) { if !process.stopped() { // The process should be gone, but we have not received the notification. // After a second, force unblock the process wait to work around a possible @@ -82,9 +82,8 @@ func (process *Process) processSignalResult(ctx context.Context, err error) (boo }() } return false, nil - default: - return false, err } + return false, nil } // Signal signals the process with `options`. diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go index ca75277a..93857da6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go @@ -24,4 +24,6 @@ type Chipset struct { // LinuxKernelDirect - Added in v2.2 Builds >=181117 LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"` + + FirmwareFile *FirmwareFile `json:"FirmwareFile,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cimfs.go similarity index 70% rename from vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cimfs.go index 81865e7e..52fb62a8 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cimfs.go @@ -9,14 +9,6 @@ package hcsschema -const ( - CimMountFlagNone uint32 = 0x0 - CimMountFlagChildOnly uint32 = 0x1 - CimMountFlagEnableDax uint32 = 0x2 - CimMountFlagCacheFiles uint32 = 0x4 - CimMountFlagCacheRegions uint32 = 0x8 -) - type CimMount struct { ImagePath string `json:"ImagePath,omitempty"` FileSystemName string `json:"FileSystemName,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go new file mode 100644 index 00000000..c27a1320 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go @@ -0,0 +1,8 @@ +package hcsschema + +type FirmwareFile struct { + // Parameters is an experimental/pre-release field. The field itself or its + // behavior can change in future iterations of the schema. Avoid taking a hard + // dependency on this field. + Parameters []byte `json:"Parameters,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go deleted file mode 100644 index 71224c75..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Memory2 struct { - SizeInMB uint64 `json:"SizeInMB,omitempty"` - - AllowOvercommit bool `json:"AllowOvercommit,omitempty"` - - EnableHotHint bool `json:"EnableHotHint,omitempty"` - - EnableColdHint bool `json:"EnableColdHint,omitempty"` - - EnableEpf bool `json:"EnableEpf,omitempty"` - - // EnableDeferredCommit is private in the schema. If regenerated need to add back. - EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` - - // EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed - // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by - // the guest operating system). - EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` - - // LowMmioGapInMB is the low MMIO region allocated below 4GB. - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` - - // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and - // size). - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` - - // HighMmioGapInMB is the high MMIO region. - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go new file mode 100644 index 00000000..41837416 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swaggerapi/swaggercodegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swaggerapi/swaggercodegen.git) + */ + +package hcsschema + +type MemoryBackingType string + +// List of MemoryBackingType +const ( + MemoryBackingType_PHYSICAL MemoryBackingType = "Physical" + MemoryBackingType_VIRTUAL MemoryBackingType = "Virtual" + MemoryBackingType_HYBRID MemoryBackingType = "Hybrid" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go new file mode 100644 index 00000000..70a13951 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go @@ -0,0 +1,19 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Numa struct { + VirtualNodeCount uint8 `json:"VirtualNodeCount,omitempty"` + PreferredPhysicalNodes []int64 `json:"PreferredPhysicalNodes,omitempty"` + Settings []NumaSetting `json:"Settings,omitempty"` + MaxSizePerNode uint64 `json:"MaxSizePerNode,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go new file mode 100644 index 00000000..5984bdec --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go @@ -0,0 +1,17 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaNode struct { + VirtualNodeIndex uint32 `json:"VirtualNodeIndex,omitempty"` + PhysicalNodeIndex uint32 `json:"PhysicalNodeIndex,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go new file mode 100644 index 00000000..88567f0f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go @@ -0,0 +1,19 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaNodeMemory struct { + // Total physical memory on on this physical NUMA node that is consumable by the VMs. + TotalConsumableMemoryInPages uint64 `json:"TotalConsumableMemoryInPages,omitempty"` + // Currently available physical memory on this physical NUMA node for the VMs. + AvailableMemoryInPages uint64 `json:"AvailableMemoryInPages,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go new file mode 100644 index 00000000..4b6795bb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go @@ -0,0 +1,17 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaNodeProcessor struct { + TotalAssignedProcessors uint32 `json:"TotalAssignedProcessors,omitempty"` + TotalAvailableProcessors uint32 `json:"TotalAvailableProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go new file mode 100644 index 00000000..bc3fba37 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaProcessors struct { + CountPerNode Range `json:"count_per_node,omitempty"` + NodePerSocket uint32 `json:"node_per_socket,omitempty"` +} + +type Range struct { + Max uint32 `json:"max,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go new file mode 100644 index 00000000..3f27b2ca --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaSetting struct { + VirtualNodeNumber uint32 `json:"VirtualNodeNumber,omitempty"` + PhysicalNodeNumber uint32 `json:"PhysicalNodeNumber,omitempty"` + VirtualSocketNumber uint32 `json:"VirtualSocketNumber,omitempty"` + CountOfProcessors uint32 `json:"CountOfProcessors,omitempty"` + CountOfMemoryBlocks uint64 `json:"CountOfMemoryBlocks,omitempty"` + MemoryBackingType MemoryBackingType `json:"MemoryBackingType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go deleted file mode 100644 index c64f335e..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.5 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Processor2 struct { - Count int32 `json:"Count,omitempty"` - - Limit int32 `json:"Limit,omitempty"` - - Weight int32 `json:"Weight,omitempty"` - - ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` - - // An optional object that configures the CPU Group to which a Virtual Machine is going to bind to. - CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go index 0c7efe8d..d4cb95bd 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go @@ -26,6 +26,8 @@ type Properties struct { RuntimeId string `json:"RuntimeId,omitempty"` + SystemGUID string `json:"SystemGUID,omitempty"` + RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"` State string `json:"State,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go index 98f2c96e..934f777f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go @@ -23,4 +23,5 @@ const ( PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus" PTProcessorTopology PropertyType = "ProcessorTopology" PTCPUGroup PropertyType = "CpuGroup" + PTSystemGUID PropertyType = "SystemGUID" ) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go index 83486994..9cca8517 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go @@ -1,16 +1,18 @@ +// Autogenerated code; DO NOT EDIT. + /* - * HCS API + * Schema Open API * * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) * - * API version: 2.1 + * API version: 2.4 * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) */ package hcsschema type Topology struct { - Memory *Memory2 `json:"Memory,omitempty"` - - Processor *Processor2 `json:"Processor,omitempty"` + Memory *VirtualMachineMemory `json:"Memory,omitempty"` + Processor *VirtualMachineProcessor `json:"Processor,omitempty"` + Numa *Numa `json:"Numa,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go index 1e0fab28..3f750466 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go @@ -1,36 +1,29 @@ +// Autogenerated code; DO NOT EDIT. + /* - * HCS API + * Schema Open API * * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) * - * API version: 2.1 + * API version: 2.4 * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) */ package hcsschema +// Configuration of a virtual machine, used during its creation to set up and/or use resources. type VirtualMachine struct { - - // StopOnReset is private in the schema. If regenerated need to put back. - StopOnReset bool `json:"StopOnReset,omitempty"` - - Chipset *Chipset `json:"Chipset,omitempty"` - - ComputeTopology *Topology `json:"ComputeTopology,omitempty"` - - Devices *Devices `json:"Devices,omitempty"` - - GuestState *GuestState `json:"GuestState,omitempty"` - - RestoreState *RestoreState `json:"RestoreState,omitempty"` - + Version *Version `json:"Version,omitempty"` + // When set to true, the virtual machine will treat a reset as a stop, releasing resources and cleaning up state. + StopOnReset bool `json:"StopOnReset,omitempty"` + Chipset *Chipset `json:"Chipset,omitempty"` + ComputeTopology *Topology `json:"ComputeTopology,omitempty"` + Devices *Devices `json:"Devices,omitempty"` + GuestState *GuestState `json:"GuestState,omitempty"` + RestoreState *RestoreState `json:"RestoreState,omitempty"` RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` - - StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` - + StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` + DebugOptions *DebugOptions `json:"DebugOptions,omitempty"` GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` - - SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"` - - DebugOptions *DebugOptions `json:"DebugOptions,omitempty"` + SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go new file mode 100644 index 00000000..17573c92 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go @@ -0,0 +1,33 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachineMemory struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` + Backing *MemoryBackingType `json:"Backing,omitempty"` + // If enabled, then the VM's memory is backed by the Windows pagefile rather than physically backed, statically allocated memory. + AllowOvercommit bool `json:"AllowOvercommit,omitempty"` + // If enabled, then the memory hot hint feature is exposed to the VM, allowing it to prefetch pages into its working set. (if supported by the guest operating system). + EnableHotHint bool `json:"EnableHotHint,omitempty"` + // If enabled, then the memory cold hint feature is exposed to the VM, allowing it to trim zeroed pages from its working set (if supported by the guest operating system). + EnableColdHint bool `json:"EnableColdHint,omitempty"` + // If enabled, then the memory cold discard hint feature is exposed to the VM, allowing it to trim non-zeroed pages from the working set (if supported by the guest operating system). + EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` + // If enabled, then commit is not charged for each backing page until first access. + EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` + // Low MMIO region allocated below 4GB + LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` + // High MMIO region allocated above 4GB (base and size) + HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` + HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` + SlitType *VirtualSlitType `json:"SlitType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go new file mode 100644 index 00000000..619cd834 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachineProcessor struct { + Count uint32 `json:"Count,omitempty"` + Limit uint64 `json:"Limit,omitempty"` + Weight uint64 `json:"Weight,omitempty"` + Reservation uint64 `json:"Reservation,omitempty"` + CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` + NumaProcessorsSettings *NumaProcessors `json:"NumaProcessorsSettings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go index f5e05903..a4a62da1 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go @@ -9,8 +9,9 @@ package hcsschema -// TODO: This is pre-release support in schema 2.3. Need to add build number +// TODO: PropagateNumaAffinity is pre-release/experimental field in schema 2.11. Need to add build number // docs when a public build with this is out. type VirtualPciDevice struct { Functions []VirtualPciFunction `json:",omitempty"` + PropagateNumaAffinity *bool `json:"PropagateNumaAffinity,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go new file mode 100644 index 00000000..dfad6231 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go @@ -0,0 +1,23 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// VirtualSlitType : Indicates if a virtual SLIT should ne enabled for a VM and the type of virtual SLIT to be enabled. +type VirtualSlitType string + +// List of VirtualSlitType +const ( + VirtualSlitType_NONE VirtualSlitType = "None" + VirtualSlitType_FIRMWARE VirtualSlitType = "Firmware" + VirtualSlitType_MEASURED VirtualSlitType = "Measured" + VirtualSlitType_FIRMWARE_FALLBACK_MEASURED VirtualSlitType = "FirmwareFallbackMeasured" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go index 8ed7e566..ee85c43b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go @@ -13,4 +13,6 @@ type WindowsCrashReporting struct { DumpFileName string `json:"DumpFileName,omitempty"` MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` + + DumpType string `json:"DumpType,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index 81d60ed4..b1597466 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -238,9 +238,10 @@ func (computeSystem *System) Shutdown(ctx context.Context) error { resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { //nolint:errorlint - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: + if err != nil && + !errors.Is(err, ErrVmcomputeAlreadyStopped) && + !errors.Is(err, ErrComputeSystemDoesNotExist) && + !errors.Is(err, ErrVmcomputeOperationPending) { return makeSystemError(computeSystem, operation, err, events) } return nil @@ -259,9 +260,10 @@ func (computeSystem *System) Terminate(ctx context.Context) error { resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { //nolint:errorlint - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: + if err != nil && + !errors.Is(err, ErrVmcomputeAlreadyStopped) && + !errors.Is(err, ErrComputeSystemDoesNotExist) && + !errors.Is(err, ErrVmcomputeOperationPending) { return makeSystemError(computeSystem, operation, err, events) } return nil @@ -279,14 +281,13 @@ func (computeSystem *System) waitBackground() { span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - switch err { //nolint:errorlint - case nil: + if err == nil { log.G(ctx).Debug("system exited") - case ErrVmcomputeUnexpectedExit: + } else if errors.Is(err, ErrVmcomputeUnexpectedExit) { log.G(ctx).Debug("unexpected system exit") computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) err = nil - default: + } else { err = makeSystemError(computeSystem, operation, err, nil) } computeSystem.closedWaitOnce.Do(func() { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go index 82ca5bae..4b1e51cb 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go @@ -47,7 +47,7 @@ func (nnvManagementMacList *HNSNnvManagementMacList) Set() (*HNSNnvManagementMac func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { operation := "Get" title := "hcsshim::nnvManagementMacList::" + operation - logrus.Debugf(title) + logrus.Debug(title) return HNSNnvManagementMacRequest("GET", "", "") } @@ -55,6 +55,6 @@ func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { func DeleteNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { operation := "Delete" title := "hcsshim::nnvManagementMacList::" + operation - logrus.Debugf(title) + logrus.Debug(title) return HNSNnvManagementMacRequest("DELETE", "", "") } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go index b505731c..3afa240a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -22,9 +22,8 @@ import ( // of the job and a mutex for synchronized handle access. type JobObject struct { handle windows.Handle - // All accesses to this MUST be done atomically except in `Open` as the object - // is being created in the function. 1 signifies that this job is currently a silo. - silo uint32 + // silo signifies that this job is currently a silo. + silo atomic.Bool mq *queue.MessageQueue handleLock sync.RWMutex } @@ -204,9 +203,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { handle: jobHandle, } - if isJobSilo(jobHandle) { - job.silo = 1 - } + job.silo.Store(isJobSilo(jobHandle)) // If the IOCP we'll be using to receive messages for all jobs hasn't been // created, create it and start polling. @@ -479,7 +476,7 @@ func (job *JobObject) ApplyFileBinding(root, target string, readOnly bool) error return ErrAlreadyClosed } - if !job.isSilo() { + if !job.silo.Load() { return ErrNotSilo } @@ -546,7 +543,7 @@ func (job *JobObject) PromoteToSilo() error { return ErrAlreadyClosed } - if job.isSilo() { + if job.silo.Load() { return nil } @@ -569,15 +566,10 @@ func (job *JobObject) PromoteToSilo() error { return fmt.Errorf("failed to promote job to silo: %w", err) } - atomic.StoreUint32(&job.silo, 1) + job.silo.Store(true) return nil } -// isSilo returns if the job object is a silo. -func (job *JobObject) isSilo() bool { - return atomic.LoadUint32(&job.silo) == 1 -} - // QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the // private working set for every process running in the job. func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go index e3b1a1ed..fedf8add 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go @@ -150,6 +150,7 @@ func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { return fmt.Errorf("affinity bitmask (%d) exceeds max allowable value (%d)", affinityBitMask, maxUintptr) } + // CodeQL [SM03681] checked against max value above (there is no math.MaxUintPtr ...) info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) return job.setExtendedInformation(info) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/context.go b/vendor/github.com/Microsoft/hcsshim/internal/log/context.go index d17d909d..4399cec6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/context.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/context.go @@ -4,7 +4,6 @@ import ( "context" "github.com/sirupsen/logrus" - "go.opencensus.io/trace" ) type entryContextKeyType int @@ -20,13 +19,13 @@ var ( // Instead, use `L.With*` or `L.Dup()`. Or `G(context.Background())`. L = logrus.NewEntry(logrus.StandardLogger()) - // G is an alias for GetEntry + // G is an alias for GetEntry. G = GetEntry - // S is an alias for SetEntry + // S is an alias for SetEntry. S = SetEntry - // U is an alias for UpdateContext + // U is an alias for UpdateContext. U = UpdateContext ) @@ -83,7 +82,7 @@ func UpdateContext(ctx context.Context) context.Context { // WithContext returns a context that contains the provided log entry. // The entry can be extracted with `GetEntry` (`G`) // -// The entry in the context is a copy of `entry` (generated by `entry.WithContext`) +// The entry in the context is a copy of `entry` (generated by `entry.WithContext`). func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *logrus.Entry) { // regardless of the order, entry.Context != GetEntry(ctx) // here, the returned entry will reference the supplied context @@ -93,25 +92,6 @@ func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *lo return ctx, entry } -// Copy extracts the tracing Span and logging entry from the src Context, if they -// exist, and adds them to the dst Context. -// -// This is useful to share tracing and logging between contexts, but not the -// cancellation. For example, if the src Context has been cancelled but cleanup -// operations triggered by the cancellation require a non-cancelled context to -// execute. -func Copy(dst context.Context, src context.Context) context.Context { - if s := trace.FromContext(src); s != nil { - dst = trace.NewContext(dst, s) - } - - if e := fromContext(src); e != nil { - dst, _ = WithContext(dst, e) - } - - return dst -} - func fromContext(ctx context.Context) *logrus.Entry { e, _ := ctx.Value(_entryContextKey).(*logrus.Entry) return e diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go index 1ceb26ba..f26316fa 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go @@ -103,9 +103,7 @@ func encode(v interface{}) (_ []byte, err error) { if jErr := enc.Encode(v); jErr != nil { if err != nil { - // TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...) - //nolint:errorlint // non-wrapping format verb for fmt.Errorf - return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr) + return nil, fmt.Errorf("protojson encoding: %w; json encoding: %w", err, jErr) } return nil, fmt.Errorf("json encoding: %w", jErr) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go index 5a960e0d..5346f9b7 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go @@ -22,23 +22,14 @@ var ( // case sensitive keywords, so "env" is not a substring on "Environment" _scrubKeywords = [][]byte{[]byte("env"), []byte("Environment")} - _scrub int32 + _scrub atomic.Bool ) // SetScrubbing enables scrubbing -func SetScrubbing(enable bool) { - v := int32(0) // cant convert from bool to int32 directly - if enable { - v = 1 - } - atomic.StoreInt32(&_scrub, v) -} +func SetScrubbing(enable bool) { _scrub.Store(enable) } // IsScrubbingEnabled checks if scrubbing is enabled -func IsScrubbingEnabled() bool { - v := atomic.LoadInt32(&_scrub) - return v != 0 -} +func IsScrubbingEnabled() bool { return _scrub.Load() } // ScrubProcessParameters scrubs HCS Create Process requests with config parameters of // type internal/hcs/schema2.ScrubProcessParameters (aka hcsshema.ScrubProcessParameters) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go index 67ca897c..965086a5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -104,7 +104,7 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error }() select { case <-ctx.Done(): - if ctx.Err() == gcontext.DeadlineExceeded { //nolint:errorlint + if ctx.Err() == gcontext.DeadlineExceeded { log.G(ctx).WithField(logfields.Timeout, trueTimeout). Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " + "If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " + diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index fc12eeba..627060ce 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -34,6 +34,7 @@ const ( UtilityVMPath = `UtilityVM` UtilityVMFilesPath = `UtilityVM\Files` RegFilesPath = `Files\Windows\System32\config` + BootDirRelativePath = `\EFI\Microsoft\Boot` BcdFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\BCD` BootMgrFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\bootmgfw.efi` ContainerBaseVhd = `blank-base.vhdx` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go index 21664577..6c026d98 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go @@ -32,10 +32,16 @@ type CimFsFileMetadata struct { EACount uint32 } +type CimFsImagePath struct { + ImageDir *uint16 + ImageName *uint16 +} + //sys CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) = cimfs.CimMountImage? //sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage? //sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage? +//sys CimCreateImage2(imagePath string, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage2? //sys CimCloseImage(cimFSHandle FsHandle) = cimfs.CimCloseImage? //sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage? @@ -45,3 +51,8 @@ type CimFsFileMetadata struct { //sys CimDeletePath(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimDeletePath? //sys CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateHardLink? //sys CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateAlternateStream? +//sys CimAddFsToMergedImage(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimAddFsToMergedImage? +//sys CimAddFsToMergedImage2(cimFSHandle FsHandle, path string, flags uint32) (hr error) = cimfs.CimAddFsToMergedImage2? +//sys CimMergeMountImage(numCimPaths uint32, backingImagePaths *CimFsImagePath, flags uint32, volumeID *g) (hr error) = cimfs.CimMergeMountImage? +//sys CimTombstoneFile(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimTombstoneFile? +//sys CimCreateMergeLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateMergeLink? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index ecdded31..2abdc2e0 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -53,6 +53,8 @@ var ( procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") + procCimAddFsToMergedImage = modcimfs.NewProc("CimAddFsToMergedImage") + procCimAddFsToMergedImage2 = modcimfs.NewProc("CimAddFsToMergedImage2") procCimCloseImage = modcimfs.NewProc("CimCloseImage") procCimCloseStream = modcimfs.NewProc("CimCloseStream") procCimCommitImage = modcimfs.NewProc("CimCommitImage") @@ -60,9 +62,13 @@ var ( procCimCreateFile = modcimfs.NewProc("CimCreateFile") procCimCreateHardLink = modcimfs.NewProc("CimCreateHardLink") procCimCreateImage = modcimfs.NewProc("CimCreateImage") + procCimCreateImage2 = modcimfs.NewProc("CimCreateImage2") + procCimCreateMergeLink = modcimfs.NewProc("CimCreateMergeLink") procCimDeletePath = modcimfs.NewProc("CimDeletePath") procCimDismountImage = modcimfs.NewProc("CimDismountImage") + procCimMergeMountImage = modcimfs.NewProc("CimMergeMountImage") procCimMountImage = modcimfs.NewProc("CimMountImage") + procCimTombstoneFile = modcimfs.NewProc("CimTombstoneFile") procCimWriteStream = modcimfs.NewProc("CimWriteStream") procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") @@ -181,6 +187,54 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr return } +func CimAddFsToMergedImage(cimFSHandle FsHandle, path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimAddFsToMergedImage(cimFSHandle, _p0) +} + +func _CimAddFsToMergedImage(cimFSHandle FsHandle, path *uint16) (hr error) { + hr = procCimAddFsToMergedImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimAddFsToMergedImage.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimAddFsToMergedImage2(cimFSHandle FsHandle, path string, flags uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimAddFsToMergedImage2(cimFSHandle, _p0, flags) +} + +func _CimAddFsToMergedImage2(cimFSHandle FsHandle, path *uint16, flags uint32) (hr error) { + hr = procCimAddFsToMergedImage2.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimAddFsToMergedImage2.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(flags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimCloseImage(cimFSHandle FsHandle) (err error) { err = procCimCloseImage.Find() if err != nil { @@ -321,6 +375,59 @@ func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, ci return } +func CimCreateImage2(imagePath string, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(imagePath) + if hr != nil { + return + } + return _CimCreateImage2(_p0, flags, oldFSName, newFSName, cimFSHandle) +} + +func _CimCreateImage2(imagePath *uint16, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) { + hr = procCimCreateImage2.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimCreateImage2.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(flags), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCreateMergeLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(newPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(oldPath) + if hr != nil { + return + } + return _CimCreateMergeLink(cimFSHandle, _p0, _p1) +} + +func _CimCreateMergeLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16) (hr error) { + hr = procCimCreateMergeLink.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimCreateMergeLink.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimDeletePath(cimFSHandle FsHandle, path string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(path) @@ -360,6 +467,21 @@ func CimDismountImage(volumeID *g) (hr error) { return } +func CimMergeMountImage(numCimPaths uint32, backingImagePaths *CimFsImagePath, flags uint32, volumeID *g) (hr error) { + hr = procCimMergeMountImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimMergeMountImage.Addr(), uintptr(numCimPaths), uintptr(unsafe.Pointer(backingImagePaths)), uintptr(flags), uintptr(unsafe.Pointer(volumeID))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(imagePath) @@ -389,6 +511,30 @@ func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g return } +func CimTombstoneFile(cimFSHandle FsHandle, path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimTombstoneFile(cimFSHandle, _p0) +} + +func _CimTombstoneFile(cimFSHandle FsHandle, path *uint16) (hr error) { + hr = procCimTombstoneFile.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimTombstoneFile.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) { hr = procCimWriteStream.Find() if hr != nil { diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go index f8d411ad..a7860895 100644 --- a/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go @@ -3,7 +3,8 @@ package osversion // List of stable ABI compliant ltsc releases // Note: List must be sorted in ascending order var compatLTSCReleases = []uint16{ - V21H2Server, + LTSC2022, + LTSC2025, } // CheckHostAndContainerCompat checks if given host and container @@ -20,16 +21,25 @@ func CheckHostAndContainerCompat(host, ctr OSVersion) bool { } // If host is < WS 2022, exact version match is required - if host.Build < V21H2Server { + if host.Build < LTSC2022 { return host.Build == ctr.Build } - var supportedLtscRelease uint16 + // Find the latest LTSC version that is earlier than the host version. + // This is the earliest version of container that the host can run. + // + // If the host version is an LTSC, then it supports compatibility with + // everything from the previous LTSC up to itself, so we want supportedLTSCRelease + // to be the previous entry. + // + // If no match is found, then we know that the host is LTSC2022 exactly, + // since we already checked that it's not less than LTSC2022. + var supportedLTSCRelease uint16 = LTSC2022 for i := len(compatLTSCReleases) - 1; i >= 0; i-- { - if host.Build >= compatLTSCReleases[i] { - supportedLtscRelease = compatLTSCReleases[i] + if host.Build > compatLTSCReleases[i] { + supportedLTSCRelease = compatLTSCReleases[i] break } } - return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build + return supportedLTSCRelease <= ctr.Build && ctr.Build <= host.Build } diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go index 44636959..5392a4ce 100644 --- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -81,4 +81,11 @@ const ( // V22H2Win11 corresponds to Windows 11 (2022 Update). V22H2Win11 = 22621 + + // V23H2 is the 23H2 release in the Windows Server annual channel. + V23H2 = 25398 + + // Windows Server 2025 build 26100 + V25H1Server = 26100 + LTSC2025 = V25H1Server ) diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore deleted file mode 100644 index 8d69a941..00000000 --- a/vendor/github.com/asaskevich/govalidator/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -bin/ -.idea/ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml deleted file mode 100644 index bb83c667..00000000 --- a/vendor/github.com/asaskevich/govalidator/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -dist: xenial -go: - - '1.10' - - '1.11' - - '1.12' - - '1.13' - - 'tip' - -script: - - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md deleted file mode 100644 index 4b462b0d..00000000 --- a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,43 +0,0 @@ -# Contributor Code of Conduct - -This project adheres to [The Code Manifesto](http://codemanifesto.com) -as its guidelines for contributor interactions. - -## The Code Manifesto - -We want to work in an ecosystem that empowers developers to reach their -potential — one that encourages growth and effective collaboration. A space -that is safe for all. - -A space such as this benefits everyone that participates in it. It encourages -new developers to enter our field. It is through discussion and collaboration -that we grow, and through growth that we improve. - -In the effort to create such a place, we hold to these values: - -1. **Discrimination limits us.** This includes discrimination on the basis of - race, gender, sexual orientation, gender identity, age, nationality, - technology and any other arbitrary exclusion of a group of people. -2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort - levels. Remember that, and if brought to your attention, heed it. -3. **We are our biggest assets.** None of us were born masters of our trade. - Each of us has been helped along the way. Return that favor, when and where - you can. -4. **We are resources for the future.** As an extension of #3, share what you - know. Make yourself a resource to help those that come after you. -5. **Respect defines us.** Treat others as you wish to be treated. Make your - discussions, criticisms and debates from a position of respectfulness. Ask - yourself, is it true? Is it necessary? Is it constructive? Anything less is - unacceptable. -6. **Reactions require grace.** Angry responses are valid, but abusive language - and vindictive actions are toxic. When something happens that offends you, - handle it assertively, but be respectful. Escalate reasonably, and try to - allow the offender an opportunity to explain themselves, and possibly - correct the issue. -7. **Opinions are just that: opinions.** Each and every one of us, due to our - background and upbringing, have varying opinions. That is perfectly - acceptable. Remember this: if you respect your own opinions, you should - respect the opinions of others. -8. **To err is human.** You might not intend it, but mistakes do happen and - contribute to build experience. Tolerate honest mistakes, and don't - hesitate to apologize if you make one yourself. diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md deleted file mode 100644 index 7ed268a1..00000000 --- a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md +++ /dev/null @@ -1,63 +0,0 @@ -#### Support -If you do have a contribution to the package, feel free to create a Pull Request or an Issue. - -#### What to contribute -If you don't know what to do, there are some features and functions that need to be done - -- [ ] Refactor code -- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check -- [ ] Create actual list of contributors and projects that currently using this package -- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) -- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) -- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new -- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) -- [ ] Implement fuzzing testing -- [ ] Implement some struct/map/array utilities -- [ ] Implement map/array validation -- [ ] Implement benchmarking -- [ ] Implement batch of examples -- [ ] Look at forks for new features and fixes - -#### Advice -Feel free to create what you want, but keep in mind when you implement new features: -- Code must be clear and readable, names of variables/constants clearly describes what they are doing -- Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements - -## Financial contributions - -We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). -Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. - - -## Credits - - -### Contributors - -Thank you to all the people who have already contributed to govalidator! - - - -### Backers - -Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) - - - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE deleted file mode 100644 index cacba910..00000000 --- a/vendor/github.com/asaskevich/govalidator/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014-2020 Alex Saskevich - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md deleted file mode 100644 index 2c3fc35e..00000000 --- a/vendor/github.com/asaskevich/govalidator/README.md +++ /dev/null @@ -1,622 +0,0 @@ -govalidator -=========== -[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) -[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) -[![Coverage](https://codecov.io/gh/asaskevich/govalidator/branch/master/graph/badge.svg)](https://codecov.io/gh/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) - -A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). - -#### Installation -Make sure that Go is installed on your computer. -Type the following command in your terminal: - - go get github.com/asaskevich/govalidator - -or you can get specified release of the package with `gopkg.in`: - - go get gopkg.in/asaskevich/govalidator.v10 - -After it the package is ready to use. - - -#### Import package in your project -Add following line in your `*.go` file: -```go -import "github.com/asaskevich/govalidator" -``` -If you are unhappy to use long `govalidator`, you can do something like this: -```go -import ( - valid "github.com/asaskevich/govalidator" -) -``` - -#### Activate behavior to require all fields have a validation tag by default -`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. - -`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. - -```go -import "github.com/asaskevich/govalidator" - -func init() { - govalidator.SetFieldsRequiredByDefault(true) -} -``` - -Here's some code to explain it: -```go -// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -type exampleStruct struct { - Name string `` - Email string `valid:"email"` -} - -// this, however, will only fail when Email is empty or an invalid email address: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email"` -} - -// lastly, this will only fail when Email is an invalid email address but not when it's empty: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email,optional"` -} -``` - -#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) -##### Custom validator function signature -A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. -```go -import "github.com/asaskevich/govalidator" - -// old signature -func(i interface{}) bool - -// new signature -func(i interface{}, o interface{}) bool -``` - -##### Adding a custom validator -This was changed to prevent data races when accessing custom validators. -```go -import "github.com/asaskevich/govalidator" - -// before -govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { - // ... -} - -// after -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { - // ... -}) -``` - -#### List of functions: -```go -func Abs(value float64) float64 -func BlackList(str, chars string) string -func ByteLength(str string, params ...string) bool -func CamelCaseToUnderscore(str string) string -func Contains(str, substring string) bool -func Count(array []interface{}, iterator ConditionIterator) int -func Each(array []interface{}, iterator Iterator) -func ErrorByField(e error, field string) string -func ErrorsByField(e error) map[string]string -func Filter(array []interface{}, iterator ConditionIterator) []interface{} -func Find(array []interface{}, iterator ConditionIterator) interface{} -func GetLine(s string, index int) (string, error) -func GetLines(s string) []string -func HasLowerCase(str string) bool -func HasUpperCase(str string) bool -func HasWhitespace(str string) bool -func HasWhitespaceOnly(str string) bool -func InRange(value interface{}, left interface{}, right interface{}) bool -func InRangeFloat32(value, left, right float32) bool -func InRangeFloat64(value, left, right float64) bool -func InRangeInt(value, left, right interface{}) bool -func IsASCII(str string) bool -func IsAlpha(str string) bool -func IsAlphanumeric(str string) bool -func IsBase64(str string) bool -func IsByteLength(str string, min, max int) bool -func IsCIDR(str string) bool -func IsCRC32(str string) bool -func IsCRC32b(str string) bool -func IsCreditCard(str string) bool -func IsDNSName(str string) bool -func IsDataURI(str string) bool -func IsDialString(str string) bool -func IsDivisibleBy(str, num string) bool -func IsEmail(str string) bool -func IsExistingEmail(email string) bool -func IsFilePath(str string) (bool, int) -func IsFloat(str string) bool -func IsFullWidth(str string) bool -func IsHalfWidth(str string) bool -func IsHash(str string, algorithm string) bool -func IsHexadecimal(str string) bool -func IsHexcolor(str string) bool -func IsHost(str string) bool -func IsIP(str string) bool -func IsIPv4(str string) bool -func IsIPv6(str string) bool -func IsISBN(str string, version int) bool -func IsISBN10(str string) bool -func IsISBN13(str string) bool -func IsISO3166Alpha2(str string) bool -func IsISO3166Alpha3(str string) bool -func IsISO4217(str string) bool -func IsISO693Alpha2(str string) bool -func IsISO693Alpha3b(str string) bool -func IsIn(str string, params ...string) bool -func IsInRaw(str string, params ...string) bool -func IsInt(str string) bool -func IsJSON(str string) bool -func IsLatitude(str string) bool -func IsLongitude(str string) bool -func IsLowerCase(str string) bool -func IsMAC(str string) bool -func IsMD4(str string) bool -func IsMD5(str string) bool -func IsMagnetURI(str string) bool -func IsMongoID(str string) bool -func IsMultibyte(str string) bool -func IsNatural(value float64) bool -func IsNegative(value float64) bool -func IsNonNegative(value float64) bool -func IsNonPositive(value float64) bool -func IsNotNull(str string) bool -func IsNull(str string) bool -func IsNumeric(str string) bool -func IsPort(str string) bool -func IsPositive(value float64) bool -func IsPrintableASCII(str string) bool -func IsRFC3339(str string) bool -func IsRFC3339WithoutZone(str string) bool -func IsRGBcolor(str string) bool -func IsRegex(str string) bool -func IsRequestURI(rawurl string) bool -func IsRequestURL(rawurl string) bool -func IsRipeMD128(str string) bool -func IsRipeMD160(str string) bool -func IsRsaPub(str string, params ...string) bool -func IsRsaPublicKey(str string, keylen int) bool -func IsSHA1(str string) bool -func IsSHA256(str string) bool -func IsSHA384(str string) bool -func IsSHA512(str string) bool -func IsSSN(str string) bool -func IsSemver(str string) bool -func IsTiger128(str string) bool -func IsTiger160(str string) bool -func IsTiger192(str string) bool -func IsTime(str string, format string) bool -func IsType(v interface{}, params ...string) bool -func IsURL(str string) bool -func IsUTFDigit(str string) bool -func IsUTFLetter(str string) bool -func IsUTFLetterNumeric(str string) bool -func IsUTFNumeric(str string) bool -func IsUUID(str string) bool -func IsUUIDv3(str string) bool -func IsUUIDv4(str string) bool -func IsUUIDv5(str string) bool -func IsULID(str string) bool -func IsUnixTime(str string) bool -func IsUpperCase(str string) bool -func IsVariableWidth(str string) bool -func IsWhole(value float64) bool -func LeftTrim(str, chars string) string -func Map(array []interface{}, iterator ResultIterator) []interface{} -func Matches(str, pattern string) bool -func MaxStringLength(str string, params ...string) bool -func MinStringLength(str string, params ...string) bool -func NormalizeEmail(str string) (string, error) -func PadBoth(str string, padStr string, padLen int) string -func PadLeft(str string, padStr string, padLen int) string -func PadRight(str string, padStr string, padLen int) string -func PrependPathToErrors(err error, path string) error -func Range(str string, params ...string) bool -func RemoveTags(s string) string -func ReplacePattern(str, pattern, replace string) string -func Reverse(s string) string -func RightTrim(str, chars string) string -func RuneLength(str string, params ...string) bool -func SafeFileName(str string) string -func SetFieldsRequiredByDefault(value bool) -func SetNilPtrAllowedByRequired(value bool) -func Sign(value float64) float64 -func StringLength(str string, params ...string) bool -func StringMatches(s string, params ...string) bool -func StripLow(str string, keepNewLines bool) string -func ToBoolean(str string) (bool, error) -func ToFloat(str string) (float64, error) -func ToInt(value interface{}) (res int64, err error) -func ToJSON(obj interface{}) (string, error) -func ToString(obj interface{}) string -func Trim(str, chars string) string -func Truncate(str string, length int, ending string) string -func TruncatingErrorf(str string, args ...interface{}) error -func UnderscoreToCamelCase(s string) string -func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) -func ValidateStruct(s interface{}) (bool, error) -func WhiteList(str, chars string) string -type ConditionIterator -type CustomTypeValidator -type Error -func (e Error) Error() string -type Errors -func (es Errors) Error() string -func (es Errors) Errors() []error -type ISO3166Entry -type ISO693Entry -type InterfaceParamValidator -type Iterator -type ParamValidator -type ResultIterator -type UnsupportedTypeError -func (e *UnsupportedTypeError) Error() string -type Validator -``` - -#### Examples -###### IsURL -```go -println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) -``` -###### IsType -```go -println(govalidator.IsType("Bob", "string")) -println(govalidator.IsType(1, "int")) -i := 1 -println(govalidator.IsType(&i, "*int")) -``` - -IsType can be used through the tag `type` which is essential for map validation: -```go -type User struct { - Name string `valid:"type(string)"` - Age int `valid:"type(int)"` - Meta interface{} `valid:"type(string)"` -} -result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"}) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` -###### ToString -```go -type User struct { - FirstName string - LastName string -} - -str := govalidator.ToString(&User{"John", "Juan"}) -println(str) -``` -###### Each, Map, Filter, Count for slices -Each iterates over the slice/array and calls Iterator for every item -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.Iterator = func(value interface{}, index int) { - println(value.(int)) -} -govalidator.Each(data, fn) -``` -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { - return value.(int) * 3 -} -_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} -``` -```go -data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} -var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { - return value.(int)%2 == 0 -} -_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} -_ = govalidator.Count(data, fn) // result = 5 -``` -###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) -If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: -```go -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) -``` -For completely custom validators (interface-based), see below. - -Here is a list of available validators for struct fields (validator - used function): -```go -"email": IsEmail, -"url": IsURL, -"dialstring": IsDialString, -"requrl": IsRequestURL, -"requri": IsRequestURI, -"alpha": IsAlpha, -"utfletter": IsUTFLetter, -"alphanum": IsAlphanumeric, -"utfletternum": IsUTFLetterNumeric, -"numeric": IsNumeric, -"utfnumeric": IsUTFNumeric, -"utfdigit": IsUTFDigit, -"hexadecimal": IsHexadecimal, -"hexcolor": IsHexcolor, -"rgbcolor": IsRGBcolor, -"lowercase": IsLowerCase, -"uppercase": IsUpperCase, -"int": IsInt, -"float": IsFloat, -"null": IsNull, -"uuid": IsUUID, -"uuidv3": IsUUIDv3, -"uuidv4": IsUUIDv4, -"uuidv5": IsUUIDv5, -"creditcard": IsCreditCard, -"isbn10": IsISBN10, -"isbn13": IsISBN13, -"json": IsJSON, -"multibyte": IsMultibyte, -"ascii": IsASCII, -"printableascii": IsPrintableASCII, -"fullwidth": IsFullWidth, -"halfwidth": IsHalfWidth, -"variablewidth": IsVariableWidth, -"base64": IsBase64, -"datauri": IsDataURI, -"ip": IsIP, -"port": IsPort, -"ipv4": IsIPv4, -"ipv6": IsIPv6, -"dns": IsDNSName, -"host": IsHost, -"mac": IsMAC, -"latitude": IsLatitude, -"longitude": IsLongitude, -"ssn": IsSSN, -"semver": IsSemver, -"rfc3339": IsRFC3339, -"rfc3339WithoutZone": IsRFC3339WithoutZone, -"ISO3166Alpha2": IsISO3166Alpha2, -"ISO3166Alpha3": IsISO3166Alpha3, -"ulid": IsULID, -``` -Validators with parameters - -```go -"range(min|max)": Range, -"length(min|max)": ByteLength, -"runelength(min|max)": RuneLength, -"stringlength(min|max)": StringLength, -"matches(pattern)": StringMatches, -"in(string1|string2|...|stringN)": IsIn, -"rsapub(keylength)" : IsRsaPub, -"minstringlength(int): MinStringLength, -"maxstringlength(int): MaxStringLength, -``` -Validators with parameters for any type - -```go -"type(type)": IsType, -``` - -And here is small example of usage: -```go -type Post struct { - Title string `valid:"alphanum,required"` - Message string `valid:"duck,ascii"` - Message2 string `valid:"animal(dog)"` - AuthorIP string `valid:"ipv4"` - Date string `valid:"-"` -} -post := &Post{ - Title: "My Example Post", - Message: "duck", - Message2: "dog", - AuthorIP: "123.234.54.3", -} - -// Add your own struct validation tags -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) - -// Add your own struct validation tags with parameter -govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { - species := params[0] - return str == species -}) -govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") - -result, err := govalidator.ValidateStruct(post) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` -###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) -If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` - -So here is small example of usage: -```go -var mapTemplate = map[string]interface{}{ - "name":"required,alpha", - "family":"required,alpha", - "email":"required,email", - "cell-phone":"numeric", - "address":map[string]interface{}{ - "line1":"required,alphanum", - "line2":"alphanum", - "postal-code":"numeric", - }, -} - -var inputMap = map[string]interface{}{ - "name":"Bob", - "family":"Smith", - "email":"foo@bar.baz", - "address":map[string]interface{}{ - "line1":"", - "line2":"", - "postal-code":"", - }, -} - -result, err := govalidator.ValidateMap(inputMap, mapTemplate) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` - -###### WhiteList -```go -// Remove all characters from string ignoring characters between "a" and "z" -println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") -``` - -###### Custom validation functions -Custom validation using your own domain specific validators is also available - here's an example of how to use it: -```go -import "github.com/asaskevich/govalidator" - -type CustomByteArray [6]byte // custom types are supported and can be validated - -type StructWithCustomByteArray struct { - ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence - Email string `valid:"email"` - CustomMinLength int `valid:"-"` -} - -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { - switch v := context.(type) { // you can type switch on the context interface being validated - case StructWithCustomByteArray: - // you can check and validate against some other field in the context, - // return early or not validate against the context at all – your choice - case SomeOtherType: - // ... - default: - // expecting some other type? Throw/panic here or continue - } - - switch v := i.(type) { // type switch on the struct field being validated - case CustomByteArray: - for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes - if e != 0 { - return true - } - } - } - return false -}) -govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { - switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation - case StructWithCustomByteArray: - return len(v.ID) >= v.CustomMinLength - } - return false -}) -``` - -###### Loop over Error() -By default .Error() returns all errors in a single String. To access each error you can do this: -```go - if err != nil { - errs := err.(govalidator.Errors).Errors() - for _, e := range errs { - fmt.Println(e.Error()) - } - } -``` - -###### Custom error messages -Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: -```go -type Ticket struct { - Id int64 `json:"id"` - FirstName string `json:"firstname" valid:"required~First name is blank"` -} -``` - -#### Notes -Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). -Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). - -#### Support -If you do have a contribution to the package, feel free to create a Pull Request or an Issue. - -#### What to contribute -If you don't know what to do, there are some features and functions that need to be done - -- [ ] Refactor code -- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check -- [ ] Create actual list of contributors and projects that currently using this package -- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) -- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) -- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new -- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) -- [ ] Implement fuzzing testing -- [ ] Implement some struct/map/array utilities -- [ ] Implement map/array validation -- [ ] Implement benchmarking -- [ ] Implement batch of examples -- [ ] Look at forks for new features and fixes - -#### Advice -Feel free to create what you want, but keep in mind when you implement new features: -- Code must be clear and readable, names of variables/constants clearly describes what they are doing -- Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements - -## Credits -### Contributors - -This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. - -#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) -* [Daniel Lohse](https://github.com/annismckenzie) -* [Attila Oláh](https://github.com/attilaolah) -* [Daniel Korner](https://github.com/Dadie) -* [Steven Wilkin](https://github.com/stevenwilkin) -* [Deiwin Sarjas](https://github.com/deiwin) -* [Noah Shibley](https://github.com/slugmobile) -* [Nathan Davies](https://github.com/nathj07) -* [Matt Sanford](https://github.com/mzsanford) -* [Simon ccl1115](https://github.com/ccl1115) - - - - -### Backers - -Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] - - - - - - - - - - - - - - - -## License -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go deleted file mode 100644 index 3e1da7cb..00000000 --- a/vendor/github.com/asaskevich/govalidator/arrays.go +++ /dev/null @@ -1,87 +0,0 @@ -package govalidator - -// Iterator is the function that accepts element of slice/array and its index -type Iterator func(interface{}, int) - -// ResultIterator is the function that accepts element of slice/array and its index and returns any result -type ResultIterator func(interface{}, int) interface{} - -// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean -type ConditionIterator func(interface{}, int) bool - -// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values -type ReduceIterator func(interface{}, interface{}) interface{} - -// Some validates that any item of array corresponds to ConditionIterator. Returns boolean. -func Some(array []interface{}, iterator ConditionIterator) bool { - res := false - for index, data := range array { - res = res || iterator(data, index) - } - return res -} - -// Every validates that every item of array corresponds to ConditionIterator. Returns boolean. -func Every(array []interface{}, iterator ConditionIterator) bool { - res := true - for index, data := range array { - res = res && iterator(data, index) - } - return res -} - -// Reduce boils down a list of values into a single value by ReduceIterator -func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} { - for _, data := range array { - initialValue = iterator(initialValue, data) - } - return initialValue -} - -// Each iterates over the slice and apply Iterator to every item -func Each(array []interface{}, iterator Iterator) { - for index, data := range array { - iterator(data, index) - } -} - -// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. -func Map(array []interface{}, iterator ResultIterator) []interface{} { - var result = make([]interface{}, len(array)) - for index, data := range array { - result[index] = iterator(data, index) - } - return result -} - -// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. -func Find(array []interface{}, iterator ConditionIterator) interface{} { - for index, data := range array { - if iterator(data, index) { - return data - } - } - return nil -} - -// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. -func Filter(array []interface{}, iterator ConditionIterator) []interface{} { - var result = make([]interface{}, 0) - for index, data := range array { - if iterator(data, index) { - result = append(result, data) - } - } - return result -} - -// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. -func Count(array []interface{}, iterator ConditionIterator) int { - count := 0 - for index, data := range array { - if iterator(data, index) { - count = count + 1 - } - } - return count -} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go deleted file mode 100644 index d68e990f..00000000 --- a/vendor/github.com/asaskevich/govalidator/converter.go +++ /dev/null @@ -1,81 +0,0 @@ -package govalidator - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" -) - -// ToString convert the input to a string. -func ToString(obj interface{}) string { - res := fmt.Sprintf("%v", obj) - return res -} - -// ToJSON convert the input to a valid JSON string -func ToJSON(obj interface{}) (string, error) { - res, err := json.Marshal(obj) - if err != nil { - res = []byte("") - } - return string(res), err -} - -// ToFloat convert the input string to a float, or 0.0 if the input is not a float. -func ToFloat(value interface{}) (res float64, err error) { - val := reflect.ValueOf(value) - - switch value.(type) { - case int, int8, int16, int32, int64: - res = float64(val.Int()) - case uint, uint8, uint16, uint32, uint64: - res = float64(val.Uint()) - case float32, float64: - res = val.Float() - case string: - res, err = strconv.ParseFloat(val.String(), 64) - if err != nil { - res = 0 - } - default: - err = fmt.Errorf("ToInt: unknown interface type %T", value) - res = 0 - } - - return -} - -// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. -func ToInt(value interface{}) (res int64, err error) { - val := reflect.ValueOf(value) - - switch value.(type) { - case int, int8, int16, int32, int64: - res = val.Int() - case uint, uint8, uint16, uint32, uint64: - res = int64(val.Uint()) - case float32, float64: - res = int64(val.Float()) - case string: - if IsInt(val.String()) { - res, err = strconv.ParseInt(val.String(), 0, 64) - if err != nil { - res = 0 - } - } else { - err = fmt.Errorf("ToInt: invalid numeric format %g", value) - res = 0 - } - default: - err = fmt.Errorf("ToInt: unknown interface type %T", value) - res = 0 - } - - return -} - -// ToBoolean convert the input string to a boolean. -func ToBoolean(str string) (bool, error) { - return strconv.ParseBool(str) -} diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go deleted file mode 100644 index 55dce62d..00000000 --- a/vendor/github.com/asaskevich/govalidator/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -package govalidator - -// A package of validators and sanitizers for strings, structures and collections. diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go deleted file mode 100644 index 1da2336f..00000000 --- a/vendor/github.com/asaskevich/govalidator/error.go +++ /dev/null @@ -1,47 +0,0 @@ -package govalidator - -import ( - "sort" - "strings" -) - -// Errors is an array of multiple errors and conforms to the error interface. -type Errors []error - -// Errors returns itself. -func (es Errors) Errors() []error { - return es -} - -func (es Errors) Error() string { - var errs []string - for _, e := range es { - errs = append(errs, e.Error()) - } - sort.Strings(errs) - return strings.Join(errs, ";") -} - -// Error encapsulates a name, an error and whether there's a custom error message or not. -type Error struct { - Name string - Err error - CustomErrorMessageExists bool - - // Validator indicates the name of the validator that failed - Validator string - Path []string -} - -func (e Error) Error() string { - if e.CustomErrorMessageExists { - return e.Err.Error() - } - - errName := e.Name - if len(e.Path) > 0 { - errName = strings.Join(append(e.Path, e.Name), ".") - } - - return errName + ": " + e.Err.Error() -} diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go deleted file mode 100644 index 5041d9e8..00000000 --- a/vendor/github.com/asaskevich/govalidator/numerics.go +++ /dev/null @@ -1,100 +0,0 @@ -package govalidator - -import ( - "math" -) - -// Abs returns absolute value of number -func Abs(value float64) float64 { - return math.Abs(value) -} - -// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise -func Sign(value float64) float64 { - if value > 0 { - return 1 - } else if value < 0 { - return -1 - } else { - return 0 - } -} - -// IsNegative returns true if value < 0 -func IsNegative(value float64) bool { - return value < 0 -} - -// IsPositive returns true if value > 0 -func IsPositive(value float64) bool { - return value > 0 -} - -// IsNonNegative returns true if value >= 0 -func IsNonNegative(value float64) bool { - return value >= 0 -} - -// IsNonPositive returns true if value <= 0 -func IsNonPositive(value float64) bool { - return value <= 0 -} - -// InRangeInt returns true if value lies between left and right border -func InRangeInt(value, left, right interface{}) bool { - value64, _ := ToInt(value) - left64, _ := ToInt(left) - right64, _ := ToInt(right) - if left64 > right64 { - left64, right64 = right64, left64 - } - return value64 >= left64 && value64 <= right64 -} - -// InRangeFloat32 returns true if value lies between left and right border -func InRangeFloat32(value, left, right float32) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRangeFloat64 returns true if value lies between left and right border -func InRangeFloat64(value, left, right float64) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string. -// All types must the same type. -// False if value doesn't lie in range or if it incompatible or not comparable -func InRange(value interface{}, left interface{}, right interface{}) bool { - switch value.(type) { - case int: - intValue, _ := ToInt(value) - intLeft, _ := ToInt(left) - intRight, _ := ToInt(right) - return InRangeInt(intValue, intLeft, intRight) - case float32, float64: - intValue, _ := ToFloat(value) - intLeft, _ := ToFloat(left) - intRight, _ := ToFloat(right) - return InRangeFloat64(intValue, intLeft, intRight) - case string: - return value.(string) >= left.(string) && value.(string) <= right.(string) - default: - return false - } -} - -// IsWhole returns true if value is whole number -func IsWhole(value float64) bool { - return math.Remainder(value, 1) == 0 -} - -// IsNatural returns true if value is natural number (positive and whole) -func IsNatural(value float64) bool { - return IsWhole(value) && IsPositive(value) -} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go deleted file mode 100644 index bafc3765..00000000 --- a/vendor/github.com/asaskevich/govalidator/patterns.go +++ /dev/null @@ -1,113 +0,0 @@ -package govalidator - -import "regexp" - -// Basic regular expressions for validating strings -const ( - Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" - CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" - ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" - ISBN13 string = "^(?:[0-9]{13})$" - UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" - UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" - Alpha string = "^[a-zA-Z]+$" - Alphanumeric string = "^[a-zA-Z0-9]+$" - Numeric string = "^[0-9]+$" - Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" - Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" - Hexadecimal string = "^[0-9a-fA-F]+$" - Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" - RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" - ASCII string = "^[\x00-\x7F]+$" - Multibyte string = "[^\x00-\x7F]" - FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" - PrintableASCII string = "^[\x20-\x7E]+$" - DataURI string = "^data:.+\\/(.+);base64$" - MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" - Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" - Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" - DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` - IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` - URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` - URLUsername string = `(\S+(:\S*)?@)` - URLPath string = `((\/|\?|#)[^\s]*)` - URLPort string = `(:(\d{1,5}))` - URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` - URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` - URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` - SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` - WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` - UnixPath string = `^(/[^/\x00]*)+/?$` - WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` - UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$` - Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" - tagName string = "valid" - hasLowerCase string = ".*[[:lower:]]" - hasUpperCase string = ".*[[:upper:]]" - hasWhitespace string = ".*[[:space:]]" - hasWhitespaceOnly string = "^[[:space:]]+$" - IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" - IMSI string = "^\\d{14,15}$" - E164 string = `^\+?[1-9]\d{1,14}$` -) - -// Used by IsFilePath func -const ( - // Unknown is unresolved OS type - Unknown = iota - // Win is Windows type - Win - // Unix is *nix OS types - Unix -) - -var ( - userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") - hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") - userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") - rxEmail = regexp.MustCompile(Email) - rxCreditCard = regexp.MustCompile(CreditCard) - rxISBN10 = regexp.MustCompile(ISBN10) - rxISBN13 = regexp.MustCompile(ISBN13) - rxUUID3 = regexp.MustCompile(UUID3) - rxUUID4 = regexp.MustCompile(UUID4) - rxUUID5 = regexp.MustCompile(UUID5) - rxUUID = regexp.MustCompile(UUID) - rxAlpha = regexp.MustCompile(Alpha) - rxAlphanumeric = regexp.MustCompile(Alphanumeric) - rxNumeric = regexp.MustCompile(Numeric) - rxInt = regexp.MustCompile(Int) - rxFloat = regexp.MustCompile(Float) - rxHexadecimal = regexp.MustCompile(Hexadecimal) - rxHexcolor = regexp.MustCompile(Hexcolor) - rxRGBcolor = regexp.MustCompile(RGBcolor) - rxASCII = regexp.MustCompile(ASCII) - rxPrintableASCII = regexp.MustCompile(PrintableASCII) - rxMultibyte = regexp.MustCompile(Multibyte) - rxFullWidth = regexp.MustCompile(FullWidth) - rxHalfWidth = regexp.MustCompile(HalfWidth) - rxBase64 = regexp.MustCompile(Base64) - rxDataURI = regexp.MustCompile(DataURI) - rxMagnetURI = regexp.MustCompile(MagnetURI) - rxLatitude = regexp.MustCompile(Latitude) - rxLongitude = regexp.MustCompile(Longitude) - rxDNSName = regexp.MustCompile(DNSName) - rxURL = regexp.MustCompile(URL) - rxSSN = regexp.MustCompile(SSN) - rxWinPath = regexp.MustCompile(WinPath) - rxUnixPath = regexp.MustCompile(UnixPath) - rxARWinPath = regexp.MustCompile(WinARPath) - rxARUnixPath = regexp.MustCompile(UnixARPath) - rxSemver = regexp.MustCompile(Semver) - rxHasLowerCase = regexp.MustCompile(hasLowerCase) - rxHasUpperCase = regexp.MustCompile(hasUpperCase) - rxHasWhitespace = regexp.MustCompile(hasWhitespace) - rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) - rxIMEI = regexp.MustCompile(IMEI) - rxIMSI = regexp.MustCompile(IMSI) - rxE164 = regexp.MustCompile(E164) -) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go deleted file mode 100644 index c573abb5..00000000 --- a/vendor/github.com/asaskevich/govalidator/types.go +++ /dev/null @@ -1,656 +0,0 @@ -package govalidator - -import ( - "reflect" - "regexp" - "sort" - "sync" -) - -// Validator is a wrapper for a validator function that returns bool and accepts string. -type Validator func(str string) bool - -// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. -// The second parameter should be the context (in the case of validating a struct: the whole object being validated). -type CustomTypeValidator func(i interface{}, o interface{}) bool - -// ParamValidator is a wrapper for validator functions that accept additional parameters. -type ParamValidator func(str string, params ...string) bool - -// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value -type InterfaceParamValidator func(in interface{}, params ...string) bool -type tagOptionsMap map[string]tagOption - -func (t tagOptionsMap) orderedKeys() []string { - var keys []string - for k := range t { - keys = append(keys, k) - } - - sort.Slice(keys, func(a, b int) bool { - return t[keys[a]].order < t[keys[b]].order - }) - - return keys -} - -type tagOption struct { - name string - customErrorMessage string - order int -} - -// UnsupportedTypeError is a wrapper for reflect.Type -type UnsupportedTypeError struct { - Type reflect.Type -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflect.Value - -// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value -var InterfaceParamTagMap = map[string]InterfaceParamValidator{ - "type": IsType, -} - -// InterfaceParamTagRegexMap maps interface param tags to their respective regexes. -var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ - "type": regexp.MustCompile(`^type\((.*)\)$`), -} - -// ParamTagMap is a map of functions accept variants parameters -var ParamTagMap = map[string]ParamValidator{ - "length": ByteLength, - "range": Range, - "runelength": RuneLength, - "stringlength": StringLength, - "matches": StringMatches, - "in": IsInRaw, - "rsapub": IsRsaPub, - "minstringlength": MinStringLength, - "maxstringlength": MaxStringLength, -} - -// ParamTagRegexMap maps param tags to their respective regexes. -var ParamTagRegexMap = map[string]*regexp.Regexp{ - "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), - "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), - "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), - "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), - "in": regexp.MustCompile(`^in\((.*)\)`), - "matches": regexp.MustCompile(`^matches\((.+)\)$`), - "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), - "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), - "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), -} - -type customTypeTagMap struct { - validators map[string]CustomTypeValidator - - sync.RWMutex -} - -func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { - tm.RLock() - defer tm.RUnlock() - v, ok := tm.validators[name] - return v, ok -} - -func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { - tm.Lock() - defer tm.Unlock() - tm.validators[name] = ctv -} - -// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. -// Use this to validate compound or custom types that need to be handled as a whole, e.g. -// `type UUID [16]byte` (this would be handled as an array of bytes). -var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} - -// TagMap is a map of functions, that can be used as tags for ValidateStruct function. -var TagMap = map[string]Validator{ - "email": IsEmail, - "url": IsURL, - "dialstring": IsDialString, - "requrl": IsRequestURL, - "requri": IsRequestURI, - "alpha": IsAlpha, - "utfletter": IsUTFLetter, - "alphanum": IsAlphanumeric, - "utfletternum": IsUTFLetterNumeric, - "numeric": IsNumeric, - "utfnumeric": IsUTFNumeric, - "utfdigit": IsUTFDigit, - "hexadecimal": IsHexadecimal, - "hexcolor": IsHexcolor, - "rgbcolor": IsRGBcolor, - "lowercase": IsLowerCase, - "uppercase": IsUpperCase, - "int": IsInt, - "float": IsFloat, - "null": IsNull, - "notnull": IsNotNull, - "uuid": IsUUID, - "uuidv3": IsUUIDv3, - "uuidv4": IsUUIDv4, - "uuidv5": IsUUIDv5, - "creditcard": IsCreditCard, - "isbn10": IsISBN10, - "isbn13": IsISBN13, - "json": IsJSON, - "multibyte": IsMultibyte, - "ascii": IsASCII, - "printableascii": IsPrintableASCII, - "fullwidth": IsFullWidth, - "halfwidth": IsHalfWidth, - "variablewidth": IsVariableWidth, - "base64": IsBase64, - "datauri": IsDataURI, - "ip": IsIP, - "port": IsPort, - "ipv4": IsIPv4, - "ipv6": IsIPv6, - "dns": IsDNSName, - "host": IsHost, - "mac": IsMAC, - "latitude": IsLatitude, - "longitude": IsLongitude, - "ssn": IsSSN, - "semver": IsSemver, - "rfc3339": IsRFC3339, - "rfc3339WithoutZone": IsRFC3339WithoutZone, - "ISO3166Alpha2": IsISO3166Alpha2, - "ISO3166Alpha3": IsISO3166Alpha3, - "ISO4217": IsISO4217, - "IMEI": IsIMEI, - "ulid": IsULID, -} - -// ISO3166Entry stores country codes -type ISO3166Entry struct { - EnglishShortName string - FrenchShortName string - Alpha2Code string - Alpha3Code string - Numeric string -} - -//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" -var ISO3166List = []ISO3166Entry{ - {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, - {"Albania", "Albanie (l')", "AL", "ALB", "008"}, - {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, - {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, - {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, - {"Andorra", "Andorre (l')", "AD", "AND", "020"}, - {"Angola", "Angola (l')", "AO", "AGO", "024"}, - {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, - {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, - {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, - {"Australia", "Australie (l')", "AU", "AUS", "036"}, - {"Austria", "Autriche (l')", "AT", "AUT", "040"}, - {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, - {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, - {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, - {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, - {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, - {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, - {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, - {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, - {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, - {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, - {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, - {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, - {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, - {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, - {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, - {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, - {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, - {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, - {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, - {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, - {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, - {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, - {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, - {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, - {"Canada", "Canada (le)", "CA", "CAN", "124"}, - {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, - {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, - {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, - {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, - {"Chad", "Tchad (le)", "TD", "TCD", "148"}, - {"Chile", "Chili (le)", "CL", "CHL", "152"}, - {"China", "Chine (la)", "CN", "CHN", "156"}, - {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, - {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, - {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, - {"Colombia", "Colombie (la)", "CO", "COL", "170"}, - {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, - {"Mayotte", "Mayotte", "YT", "MYT", "175"}, - {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, - {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, - {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, - {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, - {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, - {"Cuba", "Cuba", "CU", "CUB", "192"}, - {"Cyprus", "Chypre", "CY", "CYP", "196"}, - {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, - {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, - {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, - {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, - {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, - {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, - {"El Salvador", "El Salvador", "SV", "SLV", "222"}, - {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, - {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, - {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, - {"Estonia", "Estonie (l')", "EE", "EST", "233"}, - {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, - {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, - {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, - {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, - {"Finland", "Finlande (la)", "FI", "FIN", "246"}, - {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, - {"France", "France (la)", "FR", "FRA", "250"}, - {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, - {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, - {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, - {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, - {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, - {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, - {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, - {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, - {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, - {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, - {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, - {"Kiribati", "Kiribati", "KI", "KIR", "296"}, - {"Greece", "Grèce (la)", "GR", "GRC", "300"}, - {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, - {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, - {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, - {"Guam", "Guam", "GU", "GUM", "316"}, - {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, - {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, - {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, - {"Haiti", "Haïti", "HT", "HTI", "332"}, - {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, - {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, - {"Honduras", "Honduras (le)", "HN", "HND", "340"}, - {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, - {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, - {"Iceland", "Islande (l')", "IS", "ISL", "352"}, - {"India", "Inde (l')", "IN", "IND", "356"}, - {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, - {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, - {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, - {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, - {"Israel", "Israël", "IL", "ISR", "376"}, - {"Italy", "Italie (l')", "IT", "ITA", "380"}, - {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, - {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, - {"Japan", "Japon (le)", "JP", "JPN", "392"}, - {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, - {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, - {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, - {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, - {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, - {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, - {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, - {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, - {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, - {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, - {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, - {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, - {"Libya", "Libye (la)", "LY", "LBY", "434"}, - {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, - {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, - {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, - {"Macao", "Macao", "MO", "MAC", "446"}, - {"Madagascar", "Madagascar", "MG", "MDG", "450"}, - {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, - {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, - {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, - {"Mali", "Mali (le)", "ML", "MLI", "466"}, - {"Malta", "Malte", "MT", "MLT", "470"}, - {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, - {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, - {"Mauritius", "Maurice", "MU", "MUS", "480"}, - {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, - {"Monaco", "Monaco", "MC", "MCO", "492"}, - {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, - {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, - {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, - {"Montserrat", "Montserrat", "MS", "MSR", "500"}, - {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, - {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, - {"Oman", "Oman", "OM", "OMN", "512"}, - {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, - {"Nauru", "Nauru", "NR", "NRU", "520"}, - {"Nepal", "Népal (le)", "NP", "NPL", "524"}, - {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, - {"Curaçao", "Curaçao", "CW", "CUW", "531"}, - {"Aruba", "Aruba", "AW", "ABW", "533"}, - {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, - {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, - {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, - {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, - {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, - {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, - {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, - {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, - {"Niue", "Niue", "NU", "NIU", "570"}, - {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, - {"Norway", "Norvège (la)", "NO", "NOR", "578"}, - {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, - {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, - {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, - {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, - {"Palau", "Palaos (les)", "PW", "PLW", "585"}, - {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, - {"Panama", "Panama (le)", "PA", "PAN", "591"}, - {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, - {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, - {"Peru", "Pérou (le)", "PE", "PER", "604"}, - {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, - {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, - {"Poland", "Pologne (la)", "PL", "POL", "616"}, - {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, - {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, - {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, - {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, - {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, - {"Réunion", "Réunion (La)", "RE", "REU", "638"}, - {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, - {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, - {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, - {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, - {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, - {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, - {"Anguilla", "Anguilla", "AI", "AIA", "660"}, - {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, - {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, - {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, - {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, - {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, - {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, - {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, - {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, - {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, - {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, - {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, - {"Singapore", "Singapour", "SG", "SGP", "702"}, - {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, - {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, - {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, - {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, - {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, - {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, - {"Spain", "Espagne (l')", "ES", "ESP", "724"}, - {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, - {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, - {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, - {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, - {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, - {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, - {"Sweden", "Suède (la)", "SE", "SWE", "752"}, - {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, - {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, - {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, - {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, - {"Togo", "Togo (le)", "TG", "TGO", "768"}, - {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, - {"Tonga", "Tonga (les)", "TO", "TON", "776"}, - {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, - {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, - {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, - {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, - {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, - {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, - {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, - {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, - {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, - {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, - {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, - {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, - {"Guernsey", "Guernesey", "GG", "GGY", "831"}, - {"Jersey", "Jersey", "JE", "JEY", "832"}, - {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, - {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, - {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, - {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, - {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, - {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, - {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, - {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, - {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, - {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, - {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, - {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, -} - -// ISO4217List is the list of ISO currency codes -var ISO4217List = []string{ - "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", - "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", - "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", - "DJF", "DKK", "DOP", "DZD", - "EGP", "ERN", "ETB", "EUR", - "FJD", "FKP", - "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", - "HKD", "HNL", "HRK", "HTG", "HUF", - "IDR", "ILS", "INR", "IQD", "IRR", "ISK", - "JMD", "JOD", "JPY", - "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", - "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", - "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", - "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", - "OMR", - "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", - "QAR", - "RON", "RSD", "RUB", "RWF", - "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL", - "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", - "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS", - "VEF", "VES", "VND", "VUV", - "WST", - "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", - "YER", - "ZAR", "ZMW", "ZWL", -} - -// ISO693Entry stores ISO language codes -type ISO693Entry struct { - Alpha3bCode string - Alpha2Code string - English string -} - -//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json -var ISO693List = []ISO693Entry{ - {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, - {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, - {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, - {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, - {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, - {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, - {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, - {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, - {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, - {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, - {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, - {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, - {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, - {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, - {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, - {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, - {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, - {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, - {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, - {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, - {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, - {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, - {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, - {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, - {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, - {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, - {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, - {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, - {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, - {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, - {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, - {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, - {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, - {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, - {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, - {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, - {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, - {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, - {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, - {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, - {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, - {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, - {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, - {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, - {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, - {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, - {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, - {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, - {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, - {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, - {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, - {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, - {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, - {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, - {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, - {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, - {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, - {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, - {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, - {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, - {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, - {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, - {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, - {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, - {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, - {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, - {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, - {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, - {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, - {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, - {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, - {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, - {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, - {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, - {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, - {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, - {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, - {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, - {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, - {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, - {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, - {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, - {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, - {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, - {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, - {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, - {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, - {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, - {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, - {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, - {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, - {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, - {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, - {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, - {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, - {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, - {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, - {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, - {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, - {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, - {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, - {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, - {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, - {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, - {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, - {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, - {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, - {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, - {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, - {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, - {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, - {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, - {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, - {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, - {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, - {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, - {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, - {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, - {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, - {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, - {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, - {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, - {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, - {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, - {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, - {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, - {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, - {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, - {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, - {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, - {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, - {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, - {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, - {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, - {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, - {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, - {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, - {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, - {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, - {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, - {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, - {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, - {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, - {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, - {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, - {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, - {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, - {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, - {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, - {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, - {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, - {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, - {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, - {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, - {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, - {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, - {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, - {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, - {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, - {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, - {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, - {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, - {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, - {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, - {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, - {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, - {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, - {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, - {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, - {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, - {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, - {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, - {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, - {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, - {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, - {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, - {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, - {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, - {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, - {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, - {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, - {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, - {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, - {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, -} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go deleted file mode 100644 index f4c30f82..00000000 --- a/vendor/github.com/asaskevich/govalidator/utils.go +++ /dev/null @@ -1,270 +0,0 @@ -package govalidator - -import ( - "errors" - "fmt" - "html" - "math" - "path" - "regexp" - "strings" - "unicode" - "unicode/utf8" -) - -// Contains checks if the string contains the substring. -func Contains(str, substring string) bool { - return strings.Contains(str, substring) -} - -// Matches checks if string matches the pattern (pattern is regular expression) -// In case of error return false -func Matches(str, pattern string) bool { - match, _ := regexp.MatchString(pattern, str) - return match -} - -// LeftTrim trims characters from the left side of the input. -// If second argument is empty, it will remove leading spaces. -func LeftTrim(str, chars string) string { - if chars == "" { - return strings.TrimLeftFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("^[" + chars + "]+") - return r.ReplaceAllString(str, "") -} - -// RightTrim trims characters from the right side of the input. -// If second argument is empty, it will remove trailing spaces. -func RightTrim(str, chars string) string { - if chars == "" { - return strings.TrimRightFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("[" + chars + "]+$") - return r.ReplaceAllString(str, "") -} - -// Trim trims characters from both sides of the input. -// If second argument is empty, it will remove spaces. -func Trim(str, chars string) string { - return LeftTrim(RightTrim(str, chars), chars) -} - -// WhiteList removes characters that do not appear in the whitelist. -func WhiteList(str, chars string) string { - pattern := "[^" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// BlackList removes characters that appear in the blacklist. -func BlackList(str, chars string) string { - pattern := "[" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// StripLow removes characters with a numerical value < 32 and 127, mostly control characters. -// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). -func StripLow(str string, keepNewLines bool) string { - chars := "" - if keepNewLines { - chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" - } else { - chars = "\x00-\x1F\x7F" - } - return BlackList(str, chars) -} - -// ReplacePattern replaces regular expression pattern in string -func ReplacePattern(str, pattern, replace string) string { - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, replace) -} - -// Escape replaces <, >, & and " with HTML entities. -var Escape = html.EscapeString - -func addSegment(inrune, segment []rune) []rune { - if len(segment) == 0 { - return inrune - } - if len(inrune) != 0 { - inrune = append(inrune, '_') - } - inrune = append(inrune, segment...) - return inrune -} - -// UnderscoreToCamelCase converts from underscore separated form to camel case form. -// Ex.: my_func => MyFunc -func UnderscoreToCamelCase(s string) string { - return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) -} - -// CamelCaseToUnderscore converts from camel case form to underscore separated form. -// Ex.: MyFunc => my_func -func CamelCaseToUnderscore(str string) string { - var output []rune - var segment []rune - for _, r := range str { - - // not treat number as separate segment - if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { - output = addSegment(output, segment) - segment = nil - } - segment = append(segment, unicode.ToLower(r)) - } - output = addSegment(output, segment) - return string(output) -} - -// Reverse returns reversed string -func Reverse(s string) string { - r := []rune(s) - for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r) -} - -// GetLines splits string by "\n" and return array of lines -func GetLines(s string) []string { - return strings.Split(s, "\n") -} - -// GetLine returns specified line of multiline string -func GetLine(s string, index int) (string, error) { - lines := GetLines(s) - if index < 0 || index >= len(lines) { - return "", errors.New("line index out of bounds") - } - return lines[index], nil -} - -// RemoveTags removes all tags from HTML string -func RemoveTags(s string) string { - return ReplacePattern(s, "<[^>]*>", "") -} - -// SafeFileName returns safe string that can be used in file names -func SafeFileName(str string) string { - name := strings.ToLower(str) - name = path.Clean(path.Base(name)) - name = strings.Trim(name, " ") - separators, err := regexp.Compile(`[ &_=+:]`) - if err == nil { - name = separators.ReplaceAllString(name, "-") - } - legal, err := regexp.Compile(`[^[:alnum:]-.]`) - if err == nil { - name = legal.ReplaceAllString(name, "") - } - for strings.Contains(name, "--") { - name = strings.Replace(name, "--", "-", -1) - } - return name -} - -// NormalizeEmail canonicalize an email address. -// The local part of the email address is lowercased for all domains; the hostname is always lowercased and -// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). -// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and -// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are -// normalized to @gmail.com. -func NormalizeEmail(str string) (string, error) { - if !IsEmail(str) { - return "", fmt.Errorf("%s is not an email", str) - } - parts := strings.Split(str, "@") - parts[0] = strings.ToLower(parts[0]) - parts[1] = strings.ToLower(parts[1]) - if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { - parts[1] = "gmail.com" - parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] - } - return strings.Join(parts, "@"), nil -} - -// Truncate a string to the closest length without breaking words. -func Truncate(str string, length int, ending string) string { - var aftstr, befstr string - if len(str) > length { - words := strings.Fields(str) - before, present := 0, 0 - for i := range words { - befstr = aftstr - before = present - aftstr = aftstr + words[i] + " " - present = len(aftstr) - if present > length && i != 0 { - if (length - before) < (present - length) { - return Trim(befstr, " /\\.,\"'#!?&@+-") + ending - } - return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending - } - } - } - - return str -} - -// PadLeft pads left side of a string if size of string is less then indicated pad length -func PadLeft(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, false) -} - -// PadRight pads right side of a string if size of string is less then indicated pad length -func PadRight(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, false, true) -} - -// PadBoth pads both sides of a string if size of string is less then indicated pad length -func PadBoth(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, true) -} - -// PadString either left, right or both sides. -// Note that padding string can be unicode and more then one character -func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { - - // When padded length is less then the current string size - if padLen < utf8.RuneCountInString(str) { - return str - } - - padLen -= utf8.RuneCountInString(str) - - targetLen := padLen - - targetLenLeft := targetLen - targetLenRight := targetLen - if padLeft && padRight { - targetLenLeft = padLen / 2 - targetLenRight = padLen - targetLenLeft - } - - strToRepeatLen := utf8.RuneCountInString(padStr) - - repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) - repeatedString := strings.Repeat(padStr, repeatTimes) - - leftSide := "" - if padLeft { - leftSide = repeatedString[0:targetLenLeft] - } - - rightSide := "" - if padRight { - rightSide = repeatedString[0:targetLenRight] - } - - return leftSide + str + rightSide -} - -// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object -func TruncatingErrorf(str string, args ...interface{}) error { - n := strings.Count(str, "%s") - return fmt.Errorf(str, args[:n]...) -} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go deleted file mode 100644 index c9c4fac0..00000000 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ /dev/null @@ -1,1768 +0,0 @@ -// Package govalidator is package of validators and sanitizers for strings, structs and collections. -package govalidator - -import ( - "bytes" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "net" - "net/url" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -var ( - fieldsRequiredByDefault bool - nilPtrAllowedByRequired = false - notNumberRegexp = regexp.MustCompile("[^0-9]+") - whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) - paramsRegexp = regexp.MustCompile(`\(.*\)$`) -) - -const maxURLRuneCount = 2083 -const minURLRuneCount = 3 -const rfc3339WithoutZone = "2006-01-02T15:04:05" - -// SetFieldsRequiredByDefault causes validation to fail when struct fields -// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). -// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -// type exampleStruct struct { -// Name string `` -// Email string `valid:"email"` -// This, however, will only fail when Email is empty or an invalid email address: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email"` -// Lastly, this will only fail when Email is an invalid email address but not when it's empty: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email,optional"` -func SetFieldsRequiredByDefault(value bool) { - fieldsRequiredByDefault = value -} - -// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. -// The validation will still reject ptr fields in their zero value state. Example with this enabled: -// type exampleStruct struct { -// Name *string `valid:"required"` -// With `Name` set to "", this will be considered invalid input and will cause a validation error. -// With `Name` set to nil, this will be considered valid by validation. -// By default this is disabled. -func SetNilPtrAllowedByRequired(value bool) { - nilPtrAllowedByRequired = value -} - -// IsEmail checks if the string is an email. -func IsEmail(str string) bool { - // TODO uppercase letters are not supported - return rxEmail.MatchString(str) -} - -// IsExistingEmail checks if the string is an email of existing domain -func IsExistingEmail(email string) bool { - - if len(email) < 6 || len(email) > 254 { - return false - } - at := strings.LastIndex(email, "@") - if at <= 0 || at > len(email)-3 { - return false - } - user := email[:at] - host := email[at+1:] - if len(user) > 64 { - return false - } - switch host { - case "localhost", "example.com": - return true - } - if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { - return false - } - if _, err := net.LookupMX(host); err != nil { - if _, err := net.LookupIP(host); err != nil { - return false - } - } - - return true -} - -// IsURL checks if the string is an URL. -func IsURL(str string) bool { - if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { - return false - } - strTemp := str - if strings.Contains(str, ":") && !strings.Contains(str, "://") { - // support no indicated urlscheme but with colon for port number - // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString - strTemp = "http://" + str - } - u, err := url.Parse(strTemp) - if err != nil { - return false - } - if strings.HasPrefix(u.Host, ".") { - return false - } - if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { - return false - } - return rxURL.MatchString(str) -} - -// IsRequestURL checks if the string rawurl, assuming -// it was received in an HTTP request, is a valid -// URL confirm to RFC 3986 -func IsRequestURL(rawurl string) bool { - url, err := url.ParseRequestURI(rawurl) - if err != nil { - return false //Couldn't even parse the rawurl - } - if len(url.Scheme) == 0 { - return false //No Scheme found - } - return true -} - -// IsRequestURI checks if the string rawurl, assuming -// it was received in an HTTP request, is an -// absolute URI or an absolute path. -func IsRequestURI(rawurl string) bool { - _, err := url.ParseRequestURI(rawurl) - return err == nil -} - -// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid. -func IsAlpha(str string) bool { - if IsNull(str) { - return true - } - return rxAlpha.MatchString(str) -} - -//IsUTFLetter checks if the string contains only unicode letter characters. -//Similar to IsAlpha but for all languages. Empty string is valid. -func IsUTFLetter(str string) bool { - if IsNull(str) { - return true - } - - for _, c := range str { - if !unicode.IsLetter(c) { - return false - } - } - return true - -} - -// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid. -func IsAlphanumeric(str string) bool { - if IsNull(str) { - return true - } - return rxAlphanumeric.MatchString(str) -} - -// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid. -func IsUTFLetterNumeric(str string) bool { - if IsNull(str) { - return true - } - for _, c := range str { - if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok - return false - } - } - return true - -} - -// IsNumeric checks if the string contains only numbers. Empty string is valid. -func IsNumeric(str string) bool { - if IsNull(str) { - return true - } - return rxNumeric.MatchString(str) -} - -// IsUTFNumeric checks if the string contains only unicode numbers of any kind. -// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. -func IsUTFNumeric(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsNumber(c) { //numbers && minus sign are ok - return false - } - } - return true - -} - -// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid. -func IsUTFDigit(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsDigit(c) { //digits && minus sign are ok - return false - } - } - return true - -} - -// IsHexadecimal checks if the string is a hexadecimal number. -func IsHexadecimal(str string) bool { - return rxHexadecimal.MatchString(str) -} - -// IsHexcolor checks if the string is a hexadecimal color. -func IsHexcolor(str string) bool { - return rxHexcolor.MatchString(str) -} - -// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB). -func IsRGBcolor(str string) bool { - return rxRGBcolor.MatchString(str) -} - -// IsLowerCase checks if the string is lowercase. Empty string is valid. -func IsLowerCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToLower(str) -} - -// IsUpperCase checks if the string is uppercase. Empty string is valid. -func IsUpperCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToUpper(str) -} - -// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid. -func HasLowerCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasLowerCase.MatchString(str) -} - -// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid. -func HasUpperCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasUpperCase.MatchString(str) -} - -// IsInt checks if the string is an integer. Empty string is valid. -func IsInt(str string) bool { - if IsNull(str) { - return true - } - return rxInt.MatchString(str) -} - -// IsFloat checks if the string is a float. -func IsFloat(str string) bool { - return str != "" && rxFloat.MatchString(str) -} - -// IsDivisibleBy checks if the string is a number that's divisible by another. -// If second argument is not valid integer or zero, it's return false. -// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). -func IsDivisibleBy(str, num string) bool { - f, _ := ToFloat(str) - p := int64(f) - q, _ := ToInt(num) - if q == 0 { - return false - } - return (p == 0) || (p%q == 0) -} - -// IsNull checks if the string is null. -func IsNull(str string) bool { - return len(str) == 0 -} - -// IsNotNull checks if the string is not null. -func IsNotNull(str string) bool { - return !IsNull(str) -} - -// HasWhitespaceOnly checks the string only contains whitespace -func HasWhitespaceOnly(str string) bool { - return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) -} - -// HasWhitespace checks if the string contains any whitespace -func HasWhitespace(str string) bool { - return len(str) > 0 && rxHasWhitespace.MatchString(str) -} - -// IsByteLength checks if the string's length (in bytes) falls in a range. -func IsByteLength(str string, min, max int) bool { - return len(str) >= min && len(str) <= max -} - -// IsUUIDv3 checks if the string is a UUID version 3. -func IsUUIDv3(str string) bool { - return rxUUID3.MatchString(str) -} - -// IsUUIDv4 checks if the string is a UUID version 4. -func IsUUIDv4(str string) bool { - return rxUUID4.MatchString(str) -} - -// IsUUIDv5 checks if the string is a UUID version 5. -func IsUUIDv5(str string) bool { - return rxUUID5.MatchString(str) -} - -// IsUUID checks if the string is a UUID (version 3, 4 or 5). -func IsUUID(str string) bool { - return rxUUID.MatchString(str) -} - -// Byte to index table for O(1) lookups when unmarshaling. -// We use 0xFF as sentinel value for invalid indexes. -var ulidDec = [...]byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, - 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, - 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, - 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, - 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, -} - -// EncodedSize is the length of a text encoded ULID. -const ulidEncodedSize = 26 - -// IsULID checks if the string is a ULID. -// -// Implementation got from: -// https://github.com/oklog/ulid (Apache-2.0 License) -// -func IsULID(str string) bool { - // Check if a base32 encoded ULID is the right length. - if len(str) != ulidEncodedSize { - return false - } - - // Check if all the characters in a base32 encoded ULID are part of the - // expected base32 character set. - if ulidDec[str[0]] == 0xFF || - ulidDec[str[1]] == 0xFF || - ulidDec[str[2]] == 0xFF || - ulidDec[str[3]] == 0xFF || - ulidDec[str[4]] == 0xFF || - ulidDec[str[5]] == 0xFF || - ulidDec[str[6]] == 0xFF || - ulidDec[str[7]] == 0xFF || - ulidDec[str[8]] == 0xFF || - ulidDec[str[9]] == 0xFF || - ulidDec[str[10]] == 0xFF || - ulidDec[str[11]] == 0xFF || - ulidDec[str[12]] == 0xFF || - ulidDec[str[13]] == 0xFF || - ulidDec[str[14]] == 0xFF || - ulidDec[str[15]] == 0xFF || - ulidDec[str[16]] == 0xFF || - ulidDec[str[17]] == 0xFF || - ulidDec[str[18]] == 0xFF || - ulidDec[str[19]] == 0xFF || - ulidDec[str[20]] == 0xFF || - ulidDec[str[21]] == 0xFF || - ulidDec[str[22]] == 0xFF || - ulidDec[str[23]] == 0xFF || - ulidDec[str[24]] == 0xFF || - ulidDec[str[25]] == 0xFF { - return false - } - - // Check if the first character in a base32 encoded ULID will overflow. This - // happens because the base32 representation encodes 130 bits, while the - // ULID is only 128 bits. - // - // See https://github.com/oklog/ulid/issues/9 for details. - if str[0] > '7' { - return false - } - return true -} - -// IsCreditCard checks if the string is a credit card. -func IsCreditCard(str string) bool { - sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") - if !rxCreditCard.MatchString(sanitized) { - return false - } - - number, _ := ToInt(sanitized) - number, lastDigit := number / 10, number % 10 - - var sum int64 - for i:=0; number > 0; i++ { - digit := number % 10 - - if i % 2 == 0 { - digit *= 2 - if digit > 9 { - digit -= 9 - } - } - - sum += digit - number = number / 10 - } - - return (sum + lastDigit) % 10 == 0 -} - -// IsISBN10 checks if the string is an ISBN version 10. -func IsISBN10(str string) bool { - return IsISBN(str, 10) -} - -// IsISBN13 checks if the string is an ISBN version 13. -func IsISBN13(str string) bool { - return IsISBN(str, 13) -} - -// IsISBN checks if the string is an ISBN (version 10 or 13). -// If version value is not equal to 10 or 13, it will be checks both variants. -func IsISBN(str string, version int) bool { - sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") - var checksum int32 - var i int32 - if version == 10 { - if !rxISBN10.MatchString(sanitized) { - return false - } - for i = 0; i < 9; i++ { - checksum += (i + 1) * int32(sanitized[i]-'0') - } - if sanitized[9] == 'X' { - checksum += 10 * 10 - } else { - checksum += 10 * int32(sanitized[9]-'0') - } - if checksum%11 == 0 { - return true - } - return false - } else if version == 13 { - if !rxISBN13.MatchString(sanitized) { - return false - } - factor := []int32{1, 3} - for i = 0; i < 12; i++ { - checksum += factor[i%2] * int32(sanitized[i]-'0') - } - return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 - } - return IsISBN(str, 10) || IsISBN(str, 13) -} - -// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal). -func IsJSON(str string) bool { - var js json.RawMessage - return json.Unmarshal([]byte(str), &js) == nil -} - -// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid. -func IsMultibyte(str string) bool { - if IsNull(str) { - return true - } - return rxMultibyte.MatchString(str) -} - -// IsASCII checks if the string contains ASCII chars only. Empty string is valid. -func IsASCII(str string) bool { - if IsNull(str) { - return true - } - return rxASCII.MatchString(str) -} - -// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid. -func IsPrintableASCII(str string) bool { - if IsNull(str) { - return true - } - return rxPrintableASCII.MatchString(str) -} - -// IsFullWidth checks if the string contains any full-width chars. Empty string is valid. -func IsFullWidth(str string) bool { - if IsNull(str) { - return true - } - return rxFullWidth.MatchString(str) -} - -// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid. -func IsHalfWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) -} - -// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid. -func IsVariableWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) -} - -// IsBase64 checks if a string is base64 encoded. -func IsBase64(str string) bool { - return rxBase64.MatchString(str) -} - -// IsFilePath checks is a string is Win or Unix file path and returns it's type. -func IsFilePath(str string) (bool, int) { - if rxWinPath.MatchString(str) { - //check windows path limit see: - // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath - if len(str[3:]) > 32767 { - return false, Win - } - return true, Win - } else if rxUnixPath.MatchString(str) { - return true, Unix - } - return false, Unknown -} - -//IsWinFilePath checks both relative & absolute paths in Windows -func IsWinFilePath(str string) bool { - if rxARWinPath.MatchString(str) { - //check windows path limit see: - // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath - if len(str[3:]) > 32767 { - return false - } - return true - } - return false -} - -//IsUnixFilePath checks both relative & absolute paths in Unix -func IsUnixFilePath(str string) bool { - if rxARUnixPath.MatchString(str) { - return true - } - return false -} - -// IsDataURI checks if a string is base64 encoded data URI such as an image -func IsDataURI(str string) bool { - dataURI := strings.Split(str, ",") - if !rxDataURI.MatchString(dataURI[0]) { - return false - } - return IsBase64(dataURI[1]) -} - -// IsMagnetURI checks if a string is valid magnet URI -func IsMagnetURI(str string) bool { - return rxMagnetURI.MatchString(str) -} - -// IsISO3166Alpha2 checks if a string is valid two-letter country code -func IsISO3166Alpha2(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO3166Alpha3 checks if a string is valid three-letter country code -func IsISO3166Alpha3(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha3Code { - return true - } - } - return false -} - -// IsISO693Alpha2 checks if a string is valid two-letter language code -func IsISO693Alpha2(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO693Alpha3b checks if a string is valid three-letter language code -func IsISO693Alpha3b(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha3bCode { - return true - } - } - return false -} - -// IsDNSName will validate the given string as a DNS name -func IsDNSName(str string) bool { - if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { - // constraints already violated - return false - } - return !IsIP(str) && rxDNSName.MatchString(str) -} - -// IsHash checks if a string is a hash of type algorithm. -// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] -func IsHash(str string, algorithm string) bool { - var len string - algo := strings.ToLower(algorithm) - - if algo == "crc32" || algo == "crc32b" { - len = "8" - } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { - len = "32" - } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { - len = "40" - } else if algo == "tiger192" { - len = "48" - } else if algo == "sha3-224" { - len = "56" - } else if algo == "sha256" || algo == "sha3-256" { - len = "64" - } else if algo == "sha384" || algo == "sha3-384" { - len = "96" - } else if algo == "sha512" || algo == "sha3-512" { - len = "128" - } else { - return false - } - - return Matches(str, "^[a-f0-9]{"+len+"}$") -} - -// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")` -func IsSHA3224(str string) bool { - return IsHash(str, "sha3-224") -} - -// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")` -func IsSHA3256(str string) bool { - return IsHash(str, "sha3-256") -} - -// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")` -func IsSHA3384(str string) bool { - return IsHash(str, "sha3-384") -} - -// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")` -func IsSHA3512(str string) bool { - return IsHash(str, "sha3-512") -} - -// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` -func IsSHA512(str string) bool { - return IsHash(str, "sha512") -} - -// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")` -func IsSHA384(str string) bool { - return IsHash(str, "sha384") -} - -// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")` -func IsSHA256(str string) bool { - return IsHash(str, "sha256") -} - -// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")` -func IsTiger192(str string) bool { - return IsHash(str, "tiger192") -} - -// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")` -func IsTiger160(str string) bool { - return IsHash(str, "tiger160") -} - -// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")` -func IsRipeMD160(str string) bool { - return IsHash(str, "ripemd160") -} - -// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")` -func IsSHA1(str string) bool { - return IsHash(str, "sha1") -} - -// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")` -func IsTiger128(str string) bool { - return IsHash(str, "tiger128") -} - -// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")` -func IsRipeMD128(str string) bool { - return IsHash(str, "ripemd128") -} - -// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")` -func IsCRC32(str string) bool { - return IsHash(str, "crc32") -} - -// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")` -func IsCRC32b(str string) bool { - return IsHash(str, "crc32b") -} - -// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")` -func IsMD5(str string) bool { - return IsHash(str, "md5") -} - -// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")` -func IsMD4(str string) bool { - return IsHash(str, "md4") -} - -// IsDialString validates the given string for usage with the various Dial() functions -func IsDialString(str string) bool { - if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { - return true - } - - return false -} - -// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP` -func IsIP(str string) bool { - return net.ParseIP(str) != nil -} - -// IsPort checks if a string represents a valid port -func IsPort(str string) bool { - if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { - return true - } - return false -} - -// IsIPv4 checks if the string is an IP version 4. -func IsIPv4(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ".") -} - -// IsIPv6 checks if the string is an IP version 6. -func IsIPv6(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ":") -} - -// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) -func IsCIDR(str string) bool { - _, _, err := net.ParseCIDR(str) - return err == nil -} - -// IsMAC checks if a string is valid MAC address. -// Possible MAC formats: -// 01:23:45:67:89:ab -// 01:23:45:67:89:ab:cd:ef -// 01-23-45-67-89-ab -// 01-23-45-67-89-ab-cd-ef -// 0123.4567.89ab -// 0123.4567.89ab.cdef -func IsMAC(str string) bool { - _, err := net.ParseMAC(str) - return err == nil -} - -// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name -func IsHost(str string) bool { - return IsIP(str) || IsDNSName(str) -} - -// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId. -func IsMongoID(str string) bool { - return rxHexadecimal.MatchString(str) && (len(str) == 24) -} - -// IsLatitude checks if a string is valid latitude. -func IsLatitude(str string) bool { - return rxLatitude.MatchString(str) -} - -// IsLongitude checks if a string is valid longitude. -func IsLongitude(str string) bool { - return rxLongitude.MatchString(str) -} - -// IsIMEI checks if a string is valid IMEI -func IsIMEI(str string) bool { - return rxIMEI.MatchString(str) -} - -// IsIMSI checks if a string is valid IMSI -func IsIMSI(str string) bool { - if !rxIMSI.MatchString(str) { - return false - } - - mcc, err := strconv.ParseInt(str[0:3], 10, 32) - if err != nil { - return false - } - - switch mcc { - case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219: - case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235: - case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257: - case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278: - case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293: - case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314: - case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346: - case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364: - case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402: - case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417: - case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428: - case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441: - case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467: - case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528: - case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545: - case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555: - case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611: - case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621: - case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631: - case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641: - case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652: - case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708: - case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736: - case 738, 740, 742, 744, 746, 748, 750, 995: - return true - default: - return false - } - return true -} - -// IsRsaPublicKey checks if a string is valid public key with provided length -func IsRsaPublicKey(str string, keylen int) bool { - bb := bytes.NewBufferString(str) - pemBytes, err := ioutil.ReadAll(bb) - if err != nil { - return false - } - block, _ := pem.Decode(pemBytes) - if block != nil && block.Type != "PUBLIC KEY" { - return false - } - var der []byte - - if block != nil { - der = block.Bytes - } else { - der, err = base64.StdEncoding.DecodeString(str) - if err != nil { - return false - } - } - - key, err := x509.ParsePKIXPublicKey(der) - if err != nil { - return false - } - pubkey, ok := key.(*rsa.PublicKey) - if !ok { - return false - } - bitlen := len(pubkey.N.Bytes()) * 8 - return bitlen == int(keylen) -} - -// IsRegex checks if a give string is a valid regex with RE2 syntax or not -func IsRegex(str string) bool { - if _, err := regexp.Compile(str); err == nil { - return true - } - return false -} - -func toJSONName(tag string) string { - if tag == "" { - return "" - } - - // JSON name always comes first. If there's no options then split[0] is - // JSON name, if JSON name is not set, then split[0] is an empty string. - split := strings.SplitN(tag, ",", 2) - - name := split[0] - - // However it is possible that the field is skipped when - // (de-)serializing from/to JSON, in which case assume that there is no - // tag name to use - if name == "-" { - return "" - } - return name -} - -func prependPathToErrors(err error, path string) error { - switch err2 := err.(type) { - case Error: - err2.Path = append([]string{path}, err2.Path...) - return err2 - case Errors: - errors := err2.Errors() - for i, err3 := range errors { - errors[i] = prependPathToErrors(err3, path) - } - return err2 - } - return err -} - -// ValidateArray performs validation according to condition iterator that validates every element of the array -func ValidateArray(array []interface{}, iterator ConditionIterator) bool { - return Every(array, iterator) -} - -// ValidateMap use validation map for fields. -// result will be equal to `false` if there are any errors. -// s is the map containing the data to be validated. -// m is the validation map in the form: -// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} -func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { - if s == nil { - return true, nil - } - result := true - var err error - var errs Errors - var index int - val := reflect.ValueOf(s) - for key, value := range s { - presentResult := true - validator, ok := m[key] - if !ok { - presentResult = false - var err error - err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - valueField := reflect.ValueOf(value) - mapResult := true - typeResult := true - structResult := true - resultField := true - switch subValidator := validator.(type) { - case map[string]interface{}: - var err error - if v, ok := value.(map[string]interface{}); !ok { - mapResult = false - err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } else { - mapResult, err = ValidateMap(v, subValidator) - if err != nil { - mapResult = false - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - } - case string: - if (valueField.Kind() == reflect.Struct || - (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && - subValidator != "-" { - var err error - structResult, err = ValidateStruct(valueField.Interface()) - if err != nil { - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - } - resultField, err = typeCheck(valueField, reflect.StructField{ - Name: key, - PkgPath: "", - Type: val.Type(), - Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)), - Offset: 0, - Index: []int{index}, - Anonymous: false, - }, val, nil) - if err != nil { - errs = append(errs, err) - } - case nil: - // already handlerd when checked before - default: - typeResult = false - err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - result = result && presentResult && typeResult && resultField && structResult && mapResult - index++ - } - // checks required keys - requiredResult := true - for key, value := range m { - if schema, ok := value.(string); ok { - tags := parseTagIntoMap(schema) - if required, ok := tags["required"]; ok { - if _, ok := s[key]; !ok { - requiredResult = false - if required.customErrorMessage != "" { - err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}} - } else { - err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}} - } - errs = append(errs, err) - } - } - } - } - - if len(errs) > 0 { - err = errs - } - return result && requiredResult, err -} - -// ValidateStruct use tags for fields. -// result will be equal to `false` if there are any errors. -// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail) -func ValidateStruct(s interface{}) (bool, error) { - if s == nil { - return true, nil - } - result := true - var err error - val := reflect.ValueOf(s) - if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { - val = val.Elem() - } - // we only accept structs - if val.Kind() != reflect.Struct { - return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) - } - var errs Errors - for i := 0; i < val.NumField(); i++ { - valueField := val.Field(i) - typeField := val.Type().Field(i) - if typeField.PkgPath != "" { - continue // Private field - } - structResult := true - if valueField.Kind() == reflect.Interface { - valueField = valueField.Elem() - } - if (valueField.Kind() == reflect.Struct || - (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && - typeField.Tag.Get(tagName) != "-" { - var err error - structResult, err = ValidateStruct(valueField.Interface()) - if err != nil { - err = prependPathToErrors(err, typeField.Name) - errs = append(errs, err) - } - } - resultField, err2 := typeCheck(valueField, typeField, val, nil) - if err2 != nil { - - // Replace structure name with JSON name if there is a tag on the variable - jsonTag := toJSONName(typeField.Tag.Get("json")) - if jsonTag != "" { - switch jsonError := err2.(type) { - case Error: - jsonError.Name = jsonTag - err2 = jsonError - case Errors: - for i2, err3 := range jsonError { - switch customErr := err3.(type) { - case Error: - customErr.Name = jsonTag - jsonError[i2] = customErr - } - } - - err2 = jsonError - } - } - - errs = append(errs, err2) - } - result = result && resultField && structResult - } - if len(errs) > 0 { - err = errs - } - return result, err -} - -// ValidateStructAsync performs async validation of the struct and returns results through the channels -func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) { - res := make(chan bool) - errors := make(chan error) - - go func() { - defer close(res) - defer close(errors) - - isValid, isFailed := ValidateStruct(s) - - res <- isValid - errors <- isFailed - }() - - return res, errors -} - -// ValidateMapAsync performs async validation of the map and returns results through the channels -func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) { - res := make(chan bool) - errors := make(chan error) - - go func() { - defer close(res) - defer close(errors) - - isValid, isFailed := ValidateMap(s, m) - - res <- isValid - errors <- isFailed - }() - - return res, errors -} - -// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} -func parseTagIntoMap(tag string) tagOptionsMap { - optionsMap := make(tagOptionsMap) - options := strings.Split(tag, ",") - - for i, option := range options { - option = strings.TrimSpace(option) - - validationOptions := strings.Split(option, "~") - if !isValidTag(validationOptions[0]) { - continue - } - if len(validationOptions) == 2 { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} - } else { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} - } - } - return optionsMap -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -// IsSSN will validate the given string as a U.S. Social Security Number -func IsSSN(str string) bool { - if str == "" || len(str) != 11 { - return false - } - return rxSSN.MatchString(str) -} - -// IsSemver checks if string is valid semantic version -func IsSemver(str string) bool { - return rxSemver.MatchString(str) -} - -// IsType checks if interface is of some type -func IsType(v interface{}, params ...string) bool { - if len(params) == 1 { - typ := params[0] - return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1) - } - return false -} - -// IsTime checks if string is valid according to given format -func IsTime(str string, format string) bool { - _, err := time.Parse(format, str) - return err == nil -} - -// IsUnixTime checks if string is valid unix timestamp value -func IsUnixTime(str string) bool { - if _, err := strconv.Atoi(str); err == nil { - return true - } - return false -} - -// IsRFC3339 checks if string is valid timestamp value according to RFC3339 -func IsRFC3339(str string) bool { - return IsTime(str, time.RFC3339) -} - -// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone. -func IsRFC3339WithoutZone(str string) bool { - return IsTime(str, rfc3339WithoutZone) -} - -// IsISO4217 checks if string is valid ISO currency code -func IsISO4217(str string) bool { - for _, currency := range ISO4217List { - if str == currency { - return true - } - } - - return false -} - -// ByteLength checks string's length -func ByteLength(str string, params ...string) bool { - if len(params) == 2 { - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return len(str) >= int(min) && len(str) <= int(max) - } - - return false -} - -// RuneLength checks string's length -// Alias for StringLength -func RuneLength(str string, params ...string) bool { - return StringLength(str, params...) -} - -// IsRsaPub checks whether string is valid RSA key -// Alias for IsRsaPublicKey -func IsRsaPub(str string, params ...string) bool { - if len(params) == 1 { - len, _ := ToInt(params[0]) - return IsRsaPublicKey(str, int(len)) - } - - return false -} - -// StringMatches checks if a string matches a given pattern. -func StringMatches(s string, params ...string) bool { - if len(params) == 1 { - pattern := params[0] - return Matches(s, pattern) - } - return false -} - -// StringLength checks string's length (including multi byte strings) -func StringLength(str string, params ...string) bool { - - if len(params) == 2 { - strLength := utf8.RuneCountInString(str) - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return strLength >= int(min) && strLength <= int(max) - } - - return false -} - -// MinStringLength checks string's minimum length (including multi byte strings) -func MinStringLength(str string, params ...string) bool { - - if len(params) == 1 { - strLength := utf8.RuneCountInString(str) - min, _ := ToInt(params[0]) - return strLength >= int(min) - } - - return false -} - -// MaxStringLength checks string's maximum length (including multi byte strings) -func MaxStringLength(str string, params ...string) bool { - - if len(params) == 1 { - strLength := utf8.RuneCountInString(str) - max, _ := ToInt(params[0]) - return strLength <= int(max) - } - - return false -} - -// Range checks string's length -func Range(str string, params ...string) bool { - if len(params) == 2 { - value, _ := ToFloat(str) - min, _ := ToFloat(params[0]) - max, _ := ToFloat(params[1]) - return InRange(value, min, max) - } - - return false -} - -// IsInRaw checks if string is in list of allowed values -func IsInRaw(str string, params ...string) bool { - if len(params) == 1 { - rawParams := params[0] - - parsedParams := strings.Split(rawParams, "|") - - return IsIn(str, parsedParams...) - } - - return false -} - -// IsIn checks if string str is a member of the set of strings params -func IsIn(str string, params ...string) bool { - for _, param := range params { - if str == param { - return true - } - } - - return false -} - -func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { - if nilPtrAllowedByRequired { - k := v.Kind() - if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { - return true, nil - } - } - - if requiredOption, isRequired := options["required"]; isRequired { - if len(requiredOption.customErrorMessage) > 0 { - return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} - } - return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} - } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { - return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} - } - // not required and empty is valid - return true, nil -} - -func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { - if !v.IsValid() { - return false, nil - } - - tag := t.Tag.Get(tagName) - - // checks if the field should be ignored - switch tag { - case "": - if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { - if !fieldsRequiredByDefault { - return true, nil - } - return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} - } - case "-": - return true, nil - } - - isRootType := false - if options == nil { - isRootType = true - options = parseTagIntoMap(tag) - } - - if isEmptyValue(v) { - // an empty value is not validated, checks only required - isValid, resultErr = checkRequired(v, t, options) - for key := range options { - delete(options, key) - } - return isValid, resultErr - } - - var customTypeErrors Errors - optionsOrder := options.orderedKeys() - for _, validatorName := range optionsOrder { - validatorStruct := options[validatorName] - if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { - delete(options, validatorName) - - if result := validatefunc(v.Interface(), o.Interface()); !result { - if len(validatorStruct.customErrorMessage) > 0 { - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) - continue - } - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) - } - } - } - - if len(customTypeErrors.Errors()) > 0 { - return false, customTypeErrors - } - - if isRootType { - // Ensure that we've checked the value by all specified validators before report that the value is valid - defer func() { - delete(options, "optional") - delete(options, "required") - - if isValid && resultErr == nil && len(options) != 0 { - optionsOrder := options.orderedKeys() - for _, validator := range optionsOrder { - isValid = false - resultErr = Error{t.Name, fmt.Errorf( - "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} - return - } - } - }() - } - - for _, validatorSpec := range optionsOrder { - validatorStruct := options[validatorSpec] - var negate bool - validator := validatorSpec - customMsgExists := len(validatorStruct.customErrorMessage) > 0 - - // checks whether the tag looks like '!something' or 'something' - if validator[0] == '!' { - validator = validator[1:] - negate = true - } - - // checks for interface param validators - for key, value := range InterfaceParamTagRegexMap { - ps := value.FindStringSubmatch(validator) - if len(ps) == 0 { - continue - } - - validatefunc, ok := InterfaceParamTagMap[key] - if !ok { - continue - } - - delete(options, validatorSpec) - - field := fmt.Sprint(v) - if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - } - } - - switch v.Kind() { - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Float32, reflect.Float64, - reflect.String: - // for each tag option checks the map of validator functions - for _, validatorSpec := range optionsOrder { - validatorStruct := options[validatorSpec] - var negate bool - validator := validatorSpec - customMsgExists := len(validatorStruct.customErrorMessage) > 0 - - // checks whether the tag looks like '!something' or 'something' - if validator[0] == '!' { - validator = validator[1:] - negate = true - } - - // checks for param validators - for key, value := range ParamTagRegexMap { - ps := value.FindStringSubmatch(validator) - if len(ps) == 0 { - continue - } - - validatefunc, ok := ParamTagMap[key] - if !ok { - continue - } - - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - // type not yet supported, fail - return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} - } - } - - if validatefunc, ok := TagMap[validator]; ok { - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field); !result && !negate || result && negate { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - //Not Yet Supported Types (Fail here!) - err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) - return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} - } - } - } - return true, nil - case reflect.Map: - if v.Type().Key().Kind() != reflect.String { - return false, &UnsupportedTypeError{v.Type()} - } - var sv stringValues - sv = v.MapKeys() - sort.Sort(sv) - result := true - for i, k := range sv { - var resultItem bool - var err error - if v.MapIndex(k).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.MapIndex(k), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) - if err != nil { - err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Slice, reflect.Array: - result := true - for i := 0; i < v.Len(); i++ { - var resultItem bool - var err error - if v.Index(i).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.Index(i), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.Index(i).Interface()) - if err != nil { - err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Interface: - // If the value is an interface then encode its element - if v.IsNil() { - return true, nil - } - return ValidateStruct(v.Interface()) - case reflect.Ptr: - // If the value is a pointer then checks its element - if v.IsNil() { - return true, nil - } - return typeCheck(v.Elem(), t, o, options) - case reflect.Struct: - return true, nil - default: - return false, &UnsupportedTypeError{v.Type()} - } -} - -func stripParams(validatorString string) string { - return paramsRegexp.ReplaceAllString(validatorString, "") -} - -// isEmptyValue checks whether value empty or not -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.String, reflect.Array: - return v.Len() == 0 - case reflect.Map, reflect.Slice: - return v.Len() == 0 || v.IsNil() - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - - return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) -} - -// ErrorByField returns error for specified field of the struct -// validated by ValidateStruct or empty string if there are no errors -// or this field doesn't exists or doesn't have any errors. -func ErrorByField(e error, field string) string { - if e == nil { - return "" - } - return ErrorsByField(e)[field] -} - -// ErrorsByField returns map of errors of the struct validated -// by ValidateStruct or empty map if there are no errors. -func ErrorsByField(e error) map[string]string { - m := make(map[string]string) - if e == nil { - return m - } - // prototype for ValidateStruct - - switch e := e.(type) { - case Error: - m[e.Name] = e.Err.Error() - case Errors: - for _, item := range e.Errors() { - n := ErrorsByField(item) - for k, v := range n { - m[k] = v - } - } - } - - return m -} - -// Error returns string equivalent for reflect.Type -func (e *UnsupportedTypeError) Error() string { - return "validator: unsupported type: " + e.Type.String() -} - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } -func (sv stringValues) get(i int) string { return sv[i].String() } - -func IsE164(str string) bool { - return rxE164.MatchString(str) -} diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml deleted file mode 100644 index bc5f7b08..00000000 --- a/vendor/github.com/asaskevich/govalidator/wercker.yml +++ /dev/null @@ -1,15 +0,0 @@ -box: golang -build: - steps: - - setup-go-workspace - - - script: - name: go get - code: | - go version - go get -t ./... - - - script: - name: go test - code: | - go test -race -v ./... diff --git a/vendor/github.com/containerd/errdefs/pkg/errhttp/http.go b/vendor/github.com/containerd/errdefs/pkg/errhttp/http.go new file mode 100644 index 00000000..d7cd2b8c --- /dev/null +++ b/vendor/github.com/containerd/errdefs/pkg/errhttp/http.go @@ -0,0 +1,96 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errhttp provides utility functions for translating errors to +// and from a HTTP context. +// +// The functions ToHTTP and ToNative can be used to map server-side and +// client-side errors to the correct types. +package errhttp + +import ( + "errors" + "net/http" + + "github.com/containerd/errdefs" + "github.com/containerd/errdefs/pkg/internal/cause" +) + +// ToHTTP returns the best status code for the given error +func ToHTTP(err error) int { + switch { + case errdefs.IsNotFound(err): + return http.StatusNotFound + case errdefs.IsInvalidArgument(err): + return http.StatusBadRequest + case errdefs.IsConflict(err): + return http.StatusConflict + case errdefs.IsNotModified(err): + return http.StatusNotModified + case errdefs.IsFailedPrecondition(err): + return http.StatusPreconditionFailed + case errdefs.IsUnauthorized(err): + return http.StatusUnauthorized + case errdefs.IsPermissionDenied(err): + return http.StatusForbidden + case errdefs.IsResourceExhausted(err): + return http.StatusTooManyRequests + case errdefs.IsInternal(err): + return http.StatusInternalServerError + case errdefs.IsNotImplemented(err): + return http.StatusNotImplemented + case errdefs.IsUnavailable(err): + return http.StatusServiceUnavailable + case errdefs.IsUnknown(err): + var unexpected cause.ErrUnexpectedStatus + if errors.As(err, &unexpected) && unexpected.Status >= 200 && unexpected.Status < 600 { + return unexpected.Status + } + return http.StatusInternalServerError + default: + return http.StatusInternalServerError + } +} + +// ToNative returns the error best matching the HTTP status code +func ToNative(statusCode int) error { + switch statusCode { + case http.StatusNotFound: + return errdefs.ErrNotFound + case http.StatusBadRequest: + return errdefs.ErrInvalidArgument + case http.StatusConflict: + return errdefs.ErrConflict + case http.StatusPreconditionFailed: + return errdefs.ErrFailedPrecondition + case http.StatusUnauthorized: + return errdefs.ErrUnauthenticated + case http.StatusForbidden: + return errdefs.ErrPermissionDenied + case http.StatusNotModified: + return errdefs.ErrNotModified + case http.StatusTooManyRequests: + return errdefs.ErrResourceExhausted + case http.StatusInternalServerError: + return errdefs.ErrInternal + case http.StatusNotImplemented: + return errdefs.ErrNotImplemented + case http.StatusServiceUnavailable: + return errdefs.ErrUnavailable + default: + return cause.ErrUnexpectedStatus{Status: statusCode} + } +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 0ca6fd75..ba650b4d 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -26,12 +26,13 @@ import ( "archive/tar" "bytes" "compress/gzip" + "crypto/rand" "crypto/sha256" "encoding/json" "errors" "fmt" "io" - "math/rand" + "math/big" "os" "path/filepath" "reflect" @@ -45,10 +46,6 @@ import ( digest "github.com/opencontainers/go-digest" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression @@ -920,9 +917,11 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { } if sampleEntry == nil { t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + return } if targetEntry == nil { t.Fatalf("rewrite target not found") + return } targetEntry.Offset = sampleEntry.Offset }, @@ -2291,7 +2290,11 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX func randomContents(n int) string { b := make([]rune, n) for i := range b { - b[i] = runes[rand.Intn(len(runes))] + bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes)))) + if err != nil { + panic(err) + } + b[i] = runes[int(bi.Int64())] } return string(b) } diff --git a/vendor/github.com/containerd/typeurl/v2/README.md b/vendor/github.com/containerd/typeurl/v2/README.md index 8d86600a..3098526a 100644 --- a/vendor/github.com/containerd/typeurl/v2/README.md +++ b/vendor/github.com/containerd/typeurl/v2/README.md @@ -18,3 +18,9 @@ As a containerd sub-project, you will find the: * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) information in our [`containerd/project`](https://github.com/containerd/project) repository. + +## Optional + +By default, support for gogoproto is available along side the standard Google +protobuf types. +You can choose to leave gogo support out by using the `!no_gogo` build tag. diff --git a/vendor/github.com/containerd/typeurl/v2/types.go b/vendor/github.com/containerd/typeurl/v2/types.go index 78817b70..9bf78104 100644 --- a/vendor/github.com/containerd/typeurl/v2/types.go +++ b/vendor/github.com/containerd/typeurl/v2/types.go @@ -24,7 +24,6 @@ import ( "reflect" "sync" - gogoproto "github.com/gogo/protobuf/proto" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/types/known/anypb" @@ -33,8 +32,16 @@ import ( var ( mu sync.RWMutex registry = make(map[reflect.Type]string) + handlers []handler ) +type handler interface { + Marshaller(interface{}) func() ([]byte, error) + Unmarshaller(interface{}) func([]byte) error + TypeURL(interface{}) string + GetType(url string) (reflect.Type, bool) +} + // Definitions of common error types used throughout typeurl. // // These error types are used with errors.Wrap and errors.Wrapf to add context @@ -112,9 +119,12 @@ func TypeURL(v interface{}) (string, error) { switch t := v.(type) { case proto.Message: return string(t.ProtoReflect().Descriptor().FullName()), nil - case gogoproto.Message: - return gogoproto.MessageName(t), nil default: + for _, h := range handlers { + if u := h.TypeURL(v); u != "" { + return u, nil + } + } return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound) } } @@ -149,12 +159,19 @@ func MarshalAny(v interface{}) (Any, error) { marshal = func(v interface{}) ([]byte, error) { return proto.Marshal(t) } - case gogoproto.Message: - marshal = func(v interface{}) ([]byte, error) { - return gogoproto.Marshal(t) - } default: - marshal = json.Marshal + for _, h := range handlers { + if m := h.Marshaller(v); m != nil { + marshal = func(v interface{}) ([]byte, error) { + return m() + } + break + } + } + + if marshal == nil { + marshal = json.Marshal + } } url, err := TypeURL(v) @@ -223,13 +240,13 @@ func MarshalAnyToProto(from interface{}) (*anypb.Any, error) { } func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { - t, err := getTypeByUrl(typeURL) + t, isProto, err := getTypeByUrl(typeURL) if err != nil { return nil, err } if v == nil { - v = reflect.New(t.t).Interface() + v = reflect.New(t).Interface() } else { // Validate interface type provided by client vURL, err := TypeURL(v) @@ -241,51 +258,45 @@ func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) } } - if t.isProto { - switch t := v.(type) { - case proto.Message: - err = proto.Unmarshal(value, t) - case gogoproto.Message: - err = gogoproto.Unmarshal(value, t) + if isProto { + pm, ok := v.(proto.Message) + if ok { + return v, proto.Unmarshal(value, pm) } - } else { - err = json.Unmarshal(value, v) - } - return v, err -} + for _, h := range handlers { + if unmarshal := h.Unmarshaller(v); unmarshal != nil { + return v, unmarshal(value) + } + } + } -type urlType struct { - t reflect.Type - isProto bool + // fallback to json unmarshaller + return v, json.Unmarshal(value, v) } -func getTypeByUrl(url string) (urlType, error) { +func getTypeByUrl(url string) (_ reflect.Type, isProto bool, _ error) { mu.RLock() for t, u := range registry { if u == url { mu.RUnlock() - return urlType{ - t: t, - }, nil + return t, false, nil } } mu.RUnlock() - // fallback to proto registry - t := gogoproto.MessageType(url) - if t != nil { - return urlType{ - // get the underlying Elem because proto returns a pointer to the type - t: t.Elem(), - isProto: true, - }, nil - } mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) if err != nil { - return urlType{}, fmt.Errorf("type with url %s: %w", url, ErrNotFound) + if errors.Is(err, protoregistry.NotFound) { + for _, h := range handlers { + if t, isProto := h.GetType(url); t != nil { + return t, isProto, nil + } + } + } + return nil, false, fmt.Errorf("type with url %s: %w", url, ErrNotFound) } empty := mt.New().Interface() - return urlType{t: reflect.TypeOf(empty).Elem(), isProto: true}, nil + return reflect.TypeOf(empty).Elem(), true, nil } func tryDereference(v interface{}) reflect.Type { diff --git a/vendor/github.com/containerd/typeurl/v2/types_gogo.go b/vendor/github.com/containerd/typeurl/v2/types_gogo.go new file mode 100644 index 00000000..adb892ec --- /dev/null +++ b/vendor/github.com/containerd/typeurl/v2/types_gogo.go @@ -0,0 +1,68 @@ +//go:build !no_gogo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package typeurl + +import ( + "reflect" + + gogoproto "github.com/gogo/protobuf/proto" +) + +func init() { + handlers = append(handlers, gogoHandler{}) +} + +type gogoHandler struct{} + +func (gogoHandler) Marshaller(v interface{}) func() ([]byte, error) { + pm, ok := v.(gogoproto.Message) + if !ok { + return nil + } + return func() ([]byte, error) { + return gogoproto.Marshal(pm) + } +} + +func (gogoHandler) Unmarshaller(v interface{}) func([]byte) error { + pm, ok := v.(gogoproto.Message) + if !ok { + return nil + } + + return func(dt []byte) error { + return gogoproto.Unmarshal(dt, pm) + } +} + +func (gogoHandler) TypeURL(v interface{}) string { + pm, ok := v.(gogoproto.Message) + if !ok { + return "" + } + return gogoproto.MessageName(pm) +} + +func (gogoHandler) GetType(url string) (reflect.Type, bool) { + t := gogoproto.MessageType(url) + if t == nil { + return nil, false + } + return t.Elem(), true +} diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 5b26abb0..44675f37 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -148,6 +148,13 @@ type Options struct { // so that storage.ResolveReference returns exactly the created image. // WARNING: It is unspecified whether the reference also contains a reference.Named element. ReportResolvedReference *types.ImageReference + + // DestinationTimestamp, if set, will force timestamps of content created in the destination to this value. + // Most transports don't support this. + // + // In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry + // (but not a timestamp of the created archive file). + DestinationTimestamp *time.Time } // OptionCompressionVariant allows to supply information about @@ -354,6 +361,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, if err := c.dest.CommitWithOptions(ctx, private.CommitOptions{ UnparsedToplevel: c.unparsedToplevel, ReportResolvedReference: options.ReportResolvedReference, + Timestamp: options.DestinationTimestamp, }); err != nil { return nil, fmt.Errorf("committing the finished image: %w", err) } diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index 009a067c..b0c107e8 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -83,7 +83,7 @@ func platformCompressionMap(list internalManifest.List, instanceDigests []digest platformSet = set.New[string]() res[platform] = platformSet } - platformSet.AddSlice(instanceDetails.ReadOnly.CompressionAlgorithmNames) + platformSet.AddSeq(slices.Values(instanceDetails.ReadOnly.CompressionAlgorithmNames)) } return res, nil } diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index e008c7e8..19d410a6 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "iter" "maps" "reflect" "slices" @@ -109,7 +110,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar } } - if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil { + if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil { return copySingleImageResult{}, err } @@ -316,32 +317,37 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar return res, nil } -// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary. -func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { +// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary. +func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error { + ociConfig, configErr := src.OCIConfig(ctx) + // Do not fail on configErr here, this might be an artifact + // and maybe nothing needs this to be a container image and to process the config. + if dest.MustMatchRuntimeOS() { - c, err := src.OCIConfig(ctx) - if err != nil { - return fmt.Errorf("parsing image configuration: %w", err) + if configErr != nil { + return fmt.Errorf("parsing image configuration: %w", configErr) } wantedPlatforms := platform.WantedPlatforms(sys) - options := newOrderedSet() - match := false - for _, wantedPlatform := range wantedPlatforms { + if !slices.ContainsFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool { // For a transitional period, this might trigger warnings because the Variant // field was added to OCI config only recently. If this turns out to be too noisy, // revert this check to only look for (OS, Architecture). - if platform.MatchesPlatform(c.Platform, wantedPlatform) { - match = true - break + return platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) + }) { + options := newOrderedSet() + for _, p := range wantedPlatforms { + options.append(fmt.Sprintf("%s+%s+%q", p.OS, p.Architecture, p.Variant)) } - options.append(fmt.Sprintf("%s+%s+%q", wantedPlatform.OS, wantedPlatform.Architecture, wantedPlatform.Variant)) - } - if !match { logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q", - c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", ")) + ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", ")) } } + + if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil { + return err + } + return nil } @@ -412,7 +418,7 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, } } - algos, err := algorithmsByNames(compressionAlgos.Values()) + algos, err := algorithmsByNames(compressionAlgos.All()) if err != nil { return nil, err } @@ -547,7 +553,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { ic.manifestUpdates.LayerInfos = destInfos } - algos, err := algorithmsByNames(compressionAlgos.Values()) + algos, err := algorithmsByNames(compressionAlgos.All()) if err != nil { return nil, err } @@ -806,6 +812,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, private.PutBlobPartialOptions{ Cache: ic.c.blobInfoCache, + EmptyLayer: emptyLayer, LayerIndex: layerIndex, }) if err == nil { @@ -983,10 +990,10 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF return digest.Canonical.FromReader(stream) } -// algorithmsByNames returns slice of Algorithms from slice of Algorithm Names -func algorithmsByNames(names []string) ([]compressiontypes.Algorithm, error) { +// algorithmsByNames returns slice of Algorithms from a sequence of Algorithm Names +func algorithmsByNames(names iter.Seq[string]) ([]compressiontypes.Algorithm, error) { result := []compressiontypes.Algorithm{} - for _, name := range names { + for name := range names { algo, err := compression.AlgorithmByName(name) if err != nil { return nil, err diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 000c8987..6e88aa01 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -29,6 +29,7 @@ var ErrNotContainerImageDir = errors.New("not a containers image directory, don' type dirImageDestination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize stubs.AlwaysSupportsSignatures diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go index 39e92cac..2c3926cc 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/transport.go +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -101,6 +101,9 @@ func NewReference(path string, ref reference.NamedTagged) (types.ImageReference, // NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index. func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) { + if sourceIndex < 0 { + return nil, fmt.Errorf("invalid call to NewIndexReference with negative index %d", sourceIndex) + } return newReference(path, nil, sourceIndex, nil, nil) } diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go index e69e21ef..3c612f26 100644 --- a/vendor/github.com/containers/image/v5/docker/body_reader.go +++ b/vendor/github.com/containers/image/v5/docker/body_reader.go @@ -35,9 +35,9 @@ type bodyReader struct { body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close. lastRetryOffset int64 // -1 if N/A - lastRetryTime time.Time // time.Time{} if N/A + lastRetryTime time.Time // IsZero() if N/A offset int64 // Current offset within the blob - lastSuccessTime time.Time // time.Time{} if N/A + lastSuccessTime time.Time // IsZero() if N/A } // newBodyReader creates a bodyReader for request path in c. @@ -207,9 +207,9 @@ func (br *bodyReader) Read(p []byte) (int, error) { } // millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value. -// If tm is time.Time{}, it returns math.NaN() +// If tm.IsZero(), it returns math.NaN() func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 { - if tm == (time.Time{}) { + if tm.IsZero() { return math.NaN() } return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0 @@ -229,7 +229,7 @@ func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL stri logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress) return nil } - if br.lastRetryTime == (time.Time{}) { + if br.lastRetryTime.IsZero() { logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr) return nil } diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go index 64ccf6ae..6ade2384 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/client.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -3,6 +3,7 @@ package daemon import ( "net/http" "path/filepath" + "time" "github.com/containers/image/v5/types" dockerclient "github.com/docker/docker/client" @@ -47,6 +48,7 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { } switch serverURL.Scheme { case "unix": // Nothing + case "npipe": // Nothing case "http": hc := httpConfig() opts = append(opts, dockerclient.WithHTTPClient(hc)) @@ -82,6 +84,11 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, TLSClientConfig: tlsc, + // In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility. + // These idle connection limits really only apply to long-running clients, which is not our case here; + // we include the same values purely for symmetry. + MaxIdleConns: 6, + IdleConnTimeout: 30 * time.Second, }, CheckRedirect: dockerclient.CheckRedirect, }, nil @@ -92,6 +99,11 @@ func httpConfig() *http.Client { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, TLSClientConfig: nil, + // In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility. + // These idle connection limits really only apply to long-running clients, which is not our case here; + // we include the same values purely for symmetry. + MaxIdleConns: 6, + IdleConnTimeout: 30 * time.Second, }, CheckRedirect: dockerclient.CheckRedirect, } diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go index 4a59a6a6..c6537087 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -92,7 +92,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe // imageLoad accepts tar stream on reader and sends it to c func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error { - resp, err := c.ImageLoad(ctx, reader, true) + resp, err := c.ImageLoad(ctx, reader, client.ImageLoadWithQuiet(true)) if err != nil { return fmt.Errorf("starting a load operation in docker engine: %w", err) } diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go index 9eedff74..9fc0a92f 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -87,10 +87,13 @@ func ParseReference(refString string) (types.ImageReference, error) { // NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { - if id != "" && ref != nil { + switch { + case id != "" && ref != nil: return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") - } - if ref != nil { + case id == "" && ref == nil: + return nil, errors.New("docker-daemon: reference must have at least one of an image ID and a reference string") + + case ref != nil: if reference.IsNameOnly(ref) { return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) } diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go index 0a006457..06a9593d 100644 --- a/vendor/github.com/containers/image/v5/docker/distribution_error.go +++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go @@ -24,21 +24,31 @@ import ( "slices" "github.com/docker/distribution/registry/api/errcode" - dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge" ) // errNoErrorsInBody is returned when an HTTP response body parses to an empty // errcode.Errors slice. var errNoErrorsInBody = errors.New("no error details found in HTTP response body") -// unexpectedHTTPStatusError is returned when an unexpected HTTP status is +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. -type unexpectedHTTPStatusError struct { - Status string +type UnexpectedHTTPStatusError struct { + // StatusCode code as returned from the server, so callers can + // match the exact code to make certain decisions if needed. + StatusCode int + // status text as displayed in the error message, not exposed as callers should match the number. + status string +} + +func (e UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.status) } -func (e *unexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +func newUnexpectedHTTPStatusError(resp *http.Response) UnexpectedHTTPStatusError { + return UnexpectedHTTPStatusError{ + StatusCode: resp.StatusCode, + status: resp.Status, + } } // unexpectedHTTPResponseError is returned when an expected HTTP status code @@ -114,10 +124,11 @@ func mergeErrors(err1, err2 error) error { // UnexpectedHTTPStatusError returned for response code outside of expected // range. func handleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { + switch { + case resp.StatusCode == http.StatusUnauthorized: // Check for OAuth errors within the `WWW-Authenticate` header first // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range dockerChallenge.ResponseChallenges(resp) { + for c := range iterateAuthHeader(resp.Header) { if c.Scheme == "bearer" { var err errcode.Error // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 @@ -138,11 +149,13 @@ func handleErrorResponse(resp *http.Response) error { return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) } } + fallthrough + case resp.StatusCode >= 400 && resp.StatusCode < 500: err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } - return &unexpectedHTTPStatusError{Status: resp.Status} + return newUnexpectedHTTPStatusError(resp) } diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index bd024422..851d3e08 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -11,6 +11,7 @@ import ( "net/url" "os" "path/filepath" + "slices" "strconv" "strings" "sync" @@ -475,12 +476,11 @@ func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) { } // Checks if the auth headers in the response contain an indication of a failed -// authorizdation because of an "insufficient_scope" error. If that's the case, +// authorization because of an "insufficient_scope" error. If that's the case, // returns the required scope to be used for fetching a new token. func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) { if res.StatusCode == http.StatusUnauthorized { - challenges := parseAuthHeader(res.Header) - for _, challenge := range challenges { + for challenge := range iterateAuthHeader(res.Header) { if challenge.Scheme == "bearer" { if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" { if scope, ok := challenge.Parameters["scope"]; ok && scope != "" { @@ -907,6 +907,10 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { } tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = c.tlsClientConfig + // if set DockerProxyURL explicitly, use the DockerProxyURL instead of system proxy + if c.sys != nil && c.sys.DockerProxyURL != nil { + tr.Proxy = http.ProxyURL(c.sys.DockerProxyURL) + } c.client = &http.Client{Transport: tr} ping := func(scheme string) error { @@ -924,7 +928,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return registryHTTPResponseToError(resp) } - c.challenges = parseAuthHeader(resp.Header) + c.challenges = slices.Collect(iterateAuthHeader(resp.Header)) c.scheme = scheme c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" return nil @@ -992,13 +996,18 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R continue } if resp.StatusCode != http.StatusOK { - err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + err := fmt.Errorf("error fetching external blob from %q: %w", u, newUnexpectedHTTPStatusError(resp)) remoteErrors = append(remoteErrors, err) logrus.Debug(err) resp.Body.Close() continue } - return resp.Body, getBlobSize(resp), nil + + size, err := getBlobSize(resp) + if err != nil { + size = -1 + } + return resp.Body, size, nil } if remoteErrors == nil { return nil, 0, nil // fallback to non-external blob @@ -1006,12 +1015,20 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors)) } -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 +func getBlobSize(resp *http.Response) (int64, error) { + hdrs := resp.Header.Values("Content-Length") + if len(hdrs) == 0 { + return -1, errors.New(`Missing "Content-Length" header in response`) + } + hdr := hdrs[0] // Equivalent to resp.Header.Get(…) + size, err := strconv.ParseInt(hdr, 10, 64) + if err != nil { // Go’s response reader should already reject such values. + return -1, err + } + if size < 0 { // '-' is not a valid character in Content-Length, so negative values are invalid. Go’s response reader should already reject such values. + return -1, fmt.Errorf(`Invalid negative "Content-Length" %q`, hdr) } - return size + return size, nil } // getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown). @@ -1042,7 +1059,10 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty return nil, 0, fmt.Errorf("fetching blob: %w", err) } cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) - blobSize := getBlobSize(res) + blobSize, err := getBlobSize(res) + if err != nil { + blobSize = -1 + } reconnectingReader, err := newBodyReader(ctx, c, path, res.Body) if err != nil { @@ -1056,6 +1076,15 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) { // Note that this copies all kinds of attachments: attestations, and whatever else is there, // not just signatures. We leave the signature consumers to decide based on the MIME type. + + if err := desc.Digest.Validate(); err != nil { // .Algorithm() might panic without this check + return nil, fmt.Errorf("invalid digest %q: %w", desc.Digest.String(), err) + } + digestAlgorithm := desc.Digest.Algorithm() + if !digestAlgorithm.Available() { + return nil, fmt.Errorf("invalid digest %q: unsupported digest algorithm %q", desc.Digest.String(), digestAlgorithm.String()) + } + reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache) if err != nil { return nil, err @@ -1065,6 +1094,10 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR if err != nil { return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err) } + actualDigest := digestAlgorithm.FromBytes(payload) + if actualDigest != desc.Digest { + return nil, fmt.Errorf("digest mismatch, expected %q, got %q", desc.Digest.String(), actualDigest.String()) + } return payload, nil } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index f5e2bb61..76e48a38 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -41,6 +41,7 @@ import ( type dockerImageDestination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize ref dockerReference @@ -242,8 +243,12 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference. defer res.Body.Close() switch res.StatusCode { case http.StatusOK: + size, err := getBlobSize(res) + if err != nil { + return false, -1, fmt.Errorf("determining size of blob %s in %s: %w", digest, repo.Name(), err) + } logrus.Debugf("... already exists") - return true, getBlobSize(res), nil + return true, size, nil case http.StatusUnauthorized: logrus.Debugf("... not authorized") return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) @@ -610,11 +615,11 @@ func (d *dockerImageDestination) PutSignaturesWithFormat(ctx context.Context, si } switch { case d.c.supportsSignatures: - if err := d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest); err != nil { + if err := d.putSignaturesToAPIExtension(ctx, otherSignatures, *instanceDigest); err != nil { return err } case d.c.signatureBase != nil: - if err := d.putSignaturesToLookaside(signatures, *instanceDigest); err != nil { + if err := d.putSignaturesToLookaside(otherSignatures, *instanceDigest); err != nil { return err } default: diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 6e44ce09..4eb9cdfb 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -340,6 +340,10 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read } return } + if parts >= len(chunks) { + errs <- errors.New("too many parts returned by the server") + break + } s := signalCloseReader{ closed: make(chan struct{}), stream: p, @@ -565,7 +569,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL logrus.Debugf("... got status 404, as expected = end of signatures") return nil, true, nil } else if res.StatusCode != http.StatusOK { - return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", sigURL.Redacted(), res.StatusCode, http.StatusText(res.StatusCode)) + return nil, false, fmt.Errorf("reading signature from %s: %w", sigURL.Redacted(), newUnexpectedHTTPStatusError(res)) } contentType := res.Header.Get("Content-Type") diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go index e749b501..1ed40b87 100644 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -40,10 +40,10 @@ func httpResponseToError(res *http.Response, context string) error { err := registryHTTPResponseToError(res) return ErrUnauthorizedForCredentials{Err: err} default: - if context != "" { - context += ": " + if context == "" { + return newUnexpectedHTTPStatusError(res) } - return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode)) + return fmt.Errorf("%s: %w", context, newUnexpectedHTTPStatusError(res)) } } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index f1423468..8f5ba7e3 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -24,6 +24,7 @@ import ( type Destination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize stubs.NoSignaturesInitialize diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go index 883c0611..320a1780 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -242,9 +242,7 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des } knownRepoTags := set.New[string]() - for _, repoTag := range item.RepoTags { - knownRepoTags.Add(repoTag) - } + knownRepoTags.AddSeq(slices.Values(item.RepoTags)) for _, tag := range repoTags { // For github.com/docker/docker consumers, this works just as well as // refString := ref.String() diff --git a/vendor/github.com/containers/image/v5/docker/paths_common.go b/vendor/github.com/containers/image/v5/docker/paths_common.go index 862e8803..d9993630 100644 --- a/vendor/github.com/containers/image/v5/docker/paths_common.go +++ b/vendor/github.com/containers/image/v5/docker/paths_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package docker diff --git a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go index 2bf27ac0..8f0f2eee 100644 --- a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go +++ b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package docker diff --git a/vendor/github.com/containers/image/v5/docker/registries_d.go b/vendor/github.com/containers/image/v5/docker/registries_d.go index 3619c3ba..89d48cc4 100644 --- a/vendor/github.com/containers/image/v5/docker/registries_d.go +++ b/vendor/github.com/containers/image/v5/docker/registries_d.go @@ -3,6 +3,7 @@ package docker import ( "errors" "fmt" + "io/fs" "net/url" "os" "path" @@ -129,6 +130,11 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { configPath := filepath.Join(dirPath, configName) configBytes, err := os.ReadFile(configPath) if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // file must have been removed between the directory listing + // and the open call, ignore that as it is a expected race + continue + } return nil, err } diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go index 6bcb835b..f5fed07b 100644 --- a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go +++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go @@ -4,6 +4,7 @@ package docker import ( "fmt" + "iter" "net/http" "strings" ) @@ -60,15 +61,17 @@ func init() { } } -func parseAuthHeader(header http.Header) []challenge { - challenges := []challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, challenge{Scheme: v, Parameters: p}) +func iterateAuthHeader(header http.Header) iter.Seq[challenge] { + return func(yield func(challenge) bool) { + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + if !yield(challenge{Scheme: v, Parameters: p}) { + return + } + } } } - return challenges } // parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions` diff --git a/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/vendor/github.com/containers/image/v5/internal/image/unparsed.go index 0f026501..1cffe431 100644 --- a/vendor/github.com/containers/image/v5/internal/image/unparsed.go +++ b/vendor/github.com/containers/image/v5/internal/image/unparsed.go @@ -30,6 +30,9 @@ type UnparsedImage struct { // UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). // +// This implementation of [types.UnparsedImage] ensures that [types.UnparsedImage.Manifest] validates the image +// against instanceDigest if set, or, if not, a digest implied by src.Reference, if any. +// // The UnparsedImage must not be used after the underlying ImageSource is Close()d. // // This is publicly visible as c/image/image.UnparsedInstance. @@ -48,6 +51,9 @@ func (i *UnparsedImage) Reference() types.ImageReference { } // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +// +// Users of UnparsedImage are promised that this validates the image +// against either i.instanceDigest if set, or against a digest included in i.src.Reference. func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { if i.cachedManifest == nil { m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go new file mode 100644 index 00000000..c4536e93 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go @@ -0,0 +1,16 @@ +package stubs + +import ( + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing. +type IgnoresOriginalOCIConfig struct{} + +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index 0fe902e3..b2462a3b 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -14,6 +14,7 @@ import ( // wrapped provides the private.ImageDestination operations // for a destination that only implements types.ImageDestination type wrapped struct { + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize types.ImageDestination diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index 07922cec..4c1589ef 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -74,20 +74,20 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat // UpdateInstances updates the sizes, digests, and media types of the manifests // which the list catalogs. -func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { +func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { editInstances := []ListEdit{} for i, instance := range updates { editInstances = append(editInstances, ListEdit{ - UpdateOldDigest: index.Manifests[i].Digest, + UpdateOldDigest: list.Manifests[i].Digest, UpdateDigest: instance.Digest, UpdateSize: instance.Size, UpdateMediaType: instance.MediaType, ListOperation: ListOpUpdate}) } - return index.editInstances(editInstances) + return list.editInstances(editInstances) } -func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { +func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error { addedEntries := []Schema2ManifestDescriptor{} for i, editInstance := range editInstances { switch editInstance.ListOperation { @@ -98,21 +98,21 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { if err := editInstance.UpdateDigest.Validate(); err != nil { return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) } - targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool { + targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool { return m.Digest == editInstance.UpdateOldDigest }) if targetIndex == -1 { return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest) } - index.Manifests[targetIndex].Digest = editInstance.UpdateDigest + list.Manifests[targetIndex].Digest = editInstance.UpdateDigest if editInstance.UpdateSize < 0 { return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) } - index.Manifests[targetIndex].Size = editInstance.UpdateSize + list.Manifests[targetIndex].Size = editInstance.UpdateSize if editInstance.UpdateMediaType == "" { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType) } - index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType + list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType case ListOpAdd: if editInstance.AddPlatform == nil { // Should we create a struct with empty fields instead? @@ -135,13 +135,13 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { if len(addedEntries) != 0 { // slices.Clone() here to ensure a private backing array; // an external caller could have manually created Schema2ListPublic with a slice with extra capacity. - index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) + list.Manifests = append(slices.Clone(list.Manifests), addedEntries...) } return nil } -func (index *Schema2List) EditInstances(editInstances []ListEdit) error { - return index.editInstances(editInstances) +func (list *Schema2List) EditInstances(editInstances []ListEdit) error { + return list.editInstances(editInstances) } func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { @@ -280,12 +280,12 @@ func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List { return &Schema2List{*public} } -func (index *Schema2List) CloneInternal() List { - return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic)) +func (list *Schema2List) CloneInternal() List { + return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic)) } -func (index *Schema2List) Clone() ListPublic { - return index.CloneInternal() +func (list *Schema2List) Clone() ListPublic { + return list.CloneInternal() } // Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index 6a0f88d3..719deccb 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -213,12 +213,12 @@ type instanceCandidate struct { digest digest.Digest // Instance digest } -func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool { +func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip types.OptionalBool) bool { switch { case ic.platformIndex != other.platformIndex: return ic.platformIndex < other.platformIndex case ic.isZstd != other.isZstd: - if !preferGzip { + if preferGzip != types.OptionalBoolTrue { return ic.isZstd } else { return !ic.isZstd @@ -232,10 +232,6 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip // chooseInstance is a private equivalent to ChooseInstanceByCompression, // shared by ChooseInstance and ChooseInstanceByCompression. func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { - didPreferGzip := false - if preferGzip == types.OptionalBoolTrue { - didPreferGzip = true - } wantedPlatforms := platform.WantedPlatforms(ctx) var bestMatch *instanceCandidate bestMatch = nil @@ -251,7 +247,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi } candidate.platformIndex = platformIndex } - if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) { + if bestMatch == nil || candidate.isPreferredOver(bestMatch, preferGzip) { bestMatch = &candidate } } diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index 4247a8db..ae0cbdf2 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -3,6 +3,7 @@ package private import ( "context" "io" + "time" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/blobinfocache" @@ -10,6 +11,7 @@ import ( compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) // ImageSourceInternalOnly is the part of private.ImageSource that is not @@ -41,6 +43,12 @@ type ImageDestinationInternalOnly interface { // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures // on unsupported formats. + // NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, + // or an error obtaining that value (e.g. if the image is an artifact and not a container image). + // The destination can use it in its TryReusingBlob/PutBlob implementations + // (otherwise it only obtains the final config after all layers are written). + NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. @@ -110,6 +118,7 @@ type PutBlobOptions struct { // PutBlobPartialOptions are used in PutBlobPartial. type PutBlobPartialOptions struct { Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. LayerIndex int // A zero-based index of the layer within the image (PutBlobPartial is only called with layer-like blobs, not configs) } @@ -163,6 +172,12 @@ type CommitOptions struct { // What “resolved” means is transport-specific. // Transports which don’t support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value. ReportResolvedReference *types.ImageReference + // Timestamp, if set, will force timestamps of content created in the destination to this value. + // Most transports don't support this. + // + // In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry + // (but not a timestamp of the created archive file). + Timestamp *time.Time } // ImageSourceChunk is a portion of a blob. diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go index acf30343..7716b12d 100644 --- a/vendor/github.com/containers/image/v5/internal/set/set.go +++ b/vendor/github.com/containers/image/v5/internal/set/set.go @@ -1,6 +1,9 @@ package set -import "golang.org/x/exp/maps" +import ( + "iter" + "maps" +) // FIXME: // - Docstrings @@ -28,8 +31,8 @@ func (s *Set[E]) Add(v E) { s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. } -func (s *Set[E]) AddSlice(slice []E) { - for _, v := range slice { +func (s *Set[E]) AddSeq(seq iter.Seq[E]) { + for v := range seq { s.Add(v) } } @@ -47,6 +50,6 @@ func (s *Set[E]) Empty() bool { return len(s.m) == 0 } -func (s *Set[E]) Values() []E { +func (s *Set[E]) All() iter.Seq[E] { return maps.Keys(s.m) } diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index b74a1e24..f4b1fc03 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -133,12 +133,12 @@ func (m *Schema1) ConfigInfo() types.BlobInfo { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *Schema1) LayerInfos() []LayerInfo { - layers := make([]LayerInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + layers := make([]LayerInfo, 0, len(m.FSLayers)) + for i, layer := range slices.Backward(m.FSLayers) { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers = append(layers, LayerInfo{ BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, - } + }) } return layers } @@ -284,7 +284,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { } // Build the history. convertedHistory := []Schema2History{} - for _, compat := range m.ExtractedV1Compatibility { + for _, compat := range slices.Backward(m.ExtractedV1Compatibility) { hitem := Schema2History{ Created: compat.Created, CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), @@ -292,7 +292,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { Comment: compat.Comment, EmptyLayer: compat.ThrowAway, } - convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + convertedHistory = append(convertedHistory, hitem) } // Build the rootfs information. We need the decompressed sums that we've been // calculating to fill in the DiffIDs. It's expected (but not enforced by us) diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 0faa866b..a18425d0 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -166,10 +166,11 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // getEncryptedMediaType will return the mediatype to its encrypted counterpart and return // an error if the mediatype does not support encryption func getEncryptedMediaType(mediatype string) (string, error) { - if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") { + parts := strings.Split(mediatype, "+") + if slices.Contains(parts[1:], "encrypted") { return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype) } - unsuffixedMediatype := strings.Split(mediatype, "+")[0] + unsuffixedMediatype := parts[0] switch unsuffixedMediatype { case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 1d1e26c8..5aa40161 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "time" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagedestination/impl" @@ -14,6 +15,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -103,6 +105,14 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool { return d.unpackedDest.SupportsPutBlobPartial() } +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (d *ociArchiveImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return d.unpackedDest.NoteOriginalOCIConfig(ociConfig, configErr) +} + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. @@ -163,16 +173,19 @@ func (d *ociArchiveImageDestination) CommitWithOptions(ctx context.Context, opti src := d.tempDirRef.tempDirectory // path to save tarred up file dst := d.ref.resolvedFile - return tarDirectory(src, dst) + return tarDirectory(src, dst, options.Timestamp) } // tar converts the directory at src and saves it to dst -func tarDirectory(src, dst string) error { +// if contentModTimes is non-nil, tar header entries times are set to this +func tarDirectory(src, dst string, contentModTimes *time.Time) (retErr error) { // input is a stream of bytes from the archive of the directory at path input, err := archive.TarWithOptions(src, &archive.TarOptions{ Compression: archive.Uncompressed, // Don’t include the data about the user account this code is running under. ChownOpts: &idtools.IDPair{UID: 0, GID: 0}, + // override tar header timestamps + Timestamp: contentModTimes, }) if err != nil { return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err) @@ -184,7 +197,14 @@ func tarDirectory(src, dst string) error { if err != nil { return fmt.Errorf("creating tar file %q: %w", dst, err) } - defer outFile.Close() + + // since we are writing to this file, make sure we handle errors + defer func() { + closeErr := outFile.Close() + if retErr == nil { + retErr = closeErr + } + }() // copies the contents of the directory to the tar file // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index d5fee363..1cc3fa3b 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -52,13 +52,13 @@ func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) erro return internal.ValidateScope(scope) } -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI archive ImageReference. func ParseReference(reference string) (types.ImageReference, error) { file, image := internal.SplitPathAndImage(reference) return NewReference(file, image) } -// NewReference returns an OCI reference for a file and a image. +// NewReference returns an OCI archive reference for a file and an optional image name annotation (if not ""). func NewReference(file, image string) (types.ImageReference, error) { resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) if err != nil { diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go index 53827b11..c4eaed0e 100644 --- a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -6,6 +6,7 @@ import ( "path/filepath" "regexp" "runtime" + "strconv" "strings" ) @@ -98,7 +99,7 @@ func ValidateScope(scope string) error { } func validateScopeWindows(scope string) error { - matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope) if !matched { return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope) } @@ -119,3 +120,31 @@ func validateScopeNonWindows(scope string) error { return nil } + +// parseOCIReferenceName parses the image from the oci reference. +func parseOCIReferenceName(image string) (img string, index int, err error) { + index = -1 + if strings.HasPrefix(image, "@") { + idx, err := strconv.Atoi(image[1:]) + if err != nil { + return "", index, fmt.Errorf("Invalid source index @%s: not an integer: %w", image[1:], err) + } + if idx < 0 { + return "", index, fmt.Errorf("Invalid source index @%d: must not be negative", idx) + } + index = idx + } else { + img = image + } + return img, index, nil +} + +// ParseReferenceIntoElements splits the oci reference into location, image name and source index if exists +func ParseReferenceIntoElements(reference string) (string, string, int, error) { + dir, image := SplitPathAndImage(reference) + image, index, err := parseOCIReferenceName(image) + if err != nil { + return "", "", -1, err + } + return dir, image, index, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go index 08366a7e..7978229d 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go @@ -123,7 +123,7 @@ func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[diges // // So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error { - for _, digest := range blobsToDelete.Values() { + for digest := range blobsToDelete.All() { blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above if err != nil { return err @@ -159,7 +159,7 @@ func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error { return saveJSON(ref.indexPath(), index) } -func saveJSON(path string, content any) error { +func saveJSON(path string, content any) (retErr error) { // If the file already exists, get its mode to preserve it var mode fs.FileMode existingfi, err := os.Stat(path) @@ -177,7 +177,13 @@ func saveJSON(path string, content any) error { if err != nil { return err } - defer file.Close() + // since we are writing to this file, make sure we handle errors + defer func() { + closeErr := file.Close() + if retErr == nil { + retErr = closeErr + } + }() return json.NewEncoder(file).Encode(content) } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 85374cec..492ede59 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -27,6 +27,7 @@ import ( type ociImageDestination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize stubs.NoSignaturesInitialize @@ -37,6 +38,9 @@ type ociImageDestination struct { // newImageDestination returns an ImageDestination for writing to an existing directory. func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) { + if ref.sourceIndex != -1 { + return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex) + } var index *imgspecv1.Index if indexExists(ref) { var err error @@ -111,7 +115,7 @@ func (d *ociImageDestination) Close() error { // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. -func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { +func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (_ private.UploadedBlob, retErr error) { blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") if err != nil { return private.UploadedBlob{}, err @@ -120,7 +124,10 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io. explicitClosed := false defer func() { if !explicitClosed { - blobFile.Close() + closeErr := blobFile.Close() + if retErr == nil { + retErr = closeErr + } } if !succeeded { os.Remove(blobFile.Name()) @@ -137,9 +144,21 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io. if inputInfo.Size != -1 && size != inputInfo.Size { return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } - if err := blobFile.Sync(); err != nil { + + if err := d.blobFileSyncAndRename(blobFile, blobDigest, &explicitClosed); err != nil { return private.UploadedBlob{}, err } + succeeded = true + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil +} + +// blobFileSyncAndRename syncs the specified blobFile on the filesystem and renames it to the +// specific blob path determined by the blobDigest. The closed pointer indicates to the caller +// whether blobFile has been closed or not. +func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDigest digest.Digest, closed *bool) error { + if err := blobFile.Sync(); err != nil { + return err + } // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. // On Windows, the “permissions of newly created files” argument to syscall.Open is @@ -147,26 +166,30 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // always fails on Windows. if runtime.GOOS != "windows" { if err := blobFile.Chmod(0644); err != nil { - return private.UploadedBlob{}, err + return err } } blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir) if err != nil { - return private.UploadedBlob{}, err + return err } if err := ensureParentDirectoryExists(blobPath); err != nil { - return private.UploadedBlob{}, err + return err + } + + // need to explicitly close the file, since a rename won't otherwise work on Windows + err = blobFile.Close() + if err != nil { + return err } + *closed = true - // need to explicitly close the file, since a rename won't otherwise not work on Windows - blobFile.Close() - explicitClosed = true if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return private.UploadedBlob{}, err + return err } - succeeded = true - return private.UploadedBlob{Digest: blobDigest, Size: size}, nil + + return nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination @@ -299,6 +322,70 @@ func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options pri return os.WriteFile(d.ref.indexPath(), indexJSON, 0644) } +// PutBlobFromLocalFileOption is unused but may receive functionality in the future. +type PutBlobFromLocalFileOption struct{} + +// PutBlobFromLocalFile arranges the data from path to be used as blob with digest. +// It computes, and returns, the digest and size of the used file. +// +// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called. +func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (_ digest.Digest, _ int64, retErr error) { + d, ok := dest.(*ociImageDestination) + if !ok { + return "", -1, errors.New("caller error: PutBlobFromLocalFile called with a non-oci: destination") + } + + succeeded := false + blobFileClosed := false + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") + if err != nil { + return "", -1, err + } + defer func() { + if !blobFileClosed { + closeErr := blobFile.Close() + if retErr == nil { + retErr = closeErr + } + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + srcFile, err := os.Open(file) + if err != nil { + return "", -1, err + } + defer srcFile.Close() + + err = fileutils.ReflinkOrCopy(srcFile, blobFile) + if err != nil { + return "", -1, err + } + + _, err = blobFile.Seek(0, io.SeekStart) + if err != nil { + return "", -1, err + } + blobDigest, err := digest.FromReader(blobFile) + if err != nil { + return "", -1, err + } + + fileInfo, err := blobFile.Stat() + if err != nil { + return "", -1, err + } + + if err := d.blobFileSyncAndRename(blobFile, blobDigest, &blobFileClosed); err != nil { + return "", -1, err + } + + succeeded = true + return blobDigest, fileInfo.Size(), nil +} + func ensureDirectoryExists(path string) error { if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) { if err := os.MkdirAll(path, 0755); err != nil { diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go index adfd2064..49c070c2 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -16,6 +16,7 @@ import ( "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/docker/go-connections/tlsconfig" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -214,3 +215,26 @@ func getBlobSize(resp *http.Response) int64 { } return size } + +// GetLocalBlobPath returns the local path to the blob file with the given digest. +// The returned path is checked for existence so when a non existing digest is +// given an error will be returned. +// +// Important: The returned path must be treated as read only, writing the file will +// corrupt the oci layout as the digest no longer matches. +func GetLocalBlobPath(ctx context.Context, src types.ImageSource, digest digest.Digest) (string, error) { + s, ok := src.(*ociImageSource) + if !ok { + return "", errors.New("caller error: GetLocalBlobPath called with a non-oci: source") + } + + path, err := s.ref.blobPath(digest, s.sharedBlobDir) + if err != nil { + return "", err + } + if err := fileutils.Exists(path); err != nil { + return "", err + } + + return path, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index 816dfa7a..832f8908 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -12,6 +12,7 @@ import ( "github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/oci/internal" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" @@ -61,22 +62,31 @@ type ociReference struct { // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) dir string // As specified by the user. May be relative, contain symlinks, etc. resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. - // If image=="", it means the "only image" in the index.json is used in the case it is a source - // for destinations, the image name annotation "image.ref.name" is not added to the index.json + // If image=="" && sourceIndex==-1, it means the "only image" in the index.json is used in the case it is a source + // for destinations, the image name annotation "image.ref.name" is not added to the index.json. + // + // Must not be set if sourceIndex is set (the value is not -1). image string + // If not -1, a zero-based index of an image in the manifest index. Valid only for sources. + // Must not be set if image is set. + sourceIndex int } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. func ParseReference(reference string) (types.ImageReference, error) { - dir, image := internal.SplitPathAndImage(reference) - return NewReference(dir, image) + dir, image, index, err := internal.ParseReferenceIntoElements(reference) + if err != nil { + return nil, err + } + return newReference(dir, image, index) } -// NewReference returns an OCI reference for a directory and a image. +// newReference returns an OCI reference for a directory, and an image name annotation or sourceIndex. // +// If sourceIndex==-1, the index will not be valid to point out the source image, only image will be used. // We do not expose an API supplying the resolvedDir; we could, but recomputing it // is generally cheap enough that we prefer being confident about the properties of resolvedDir. -func NewReference(dir, image string) (types.ImageReference, error) { +func newReference(dir, image string, sourceIndex int) (types.ImageReference, error) { resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) if err != nil { return nil, err @@ -90,7 +100,26 @@ func NewReference(dir, image string) (types.ImageReference, error) { return nil, err } - return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil + if sourceIndex != -1 && sourceIndex < 0 { + return nil, fmt.Errorf("Invalid oci: layout reference: index @%d must not be negative", sourceIndex) + } + if sourceIndex != -1 && image != "" { + return nil, fmt.Errorf("Invalid oci: layout reference: cannot use both an image %s and a source index @%d", image, sourceIndex) + } + return ociReference{dir: dir, resolvedDir: resolved, image: image, sourceIndex: sourceIndex}, nil +} + +// NewIndexReference returns an OCI reference for a path and a zero-based source manifest index. +func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) { + if sourceIndex < 0 { + return nil, fmt.Errorf("invalid call to NewIndexReference with negative index %d", sourceIndex) + } + return newReference(dir, "", sourceIndex) +} + +// NewReference returns an OCI reference for a directory and an optional image name annotation (if not ""). +func NewReference(dir, image string) (types.ImageReference, error) { + return newReference(dir, image, -1) } func (ref ociReference) Transport() types.ImageTransport { @@ -103,7 +132,10 @@ func (ref ociReference) Transport() types.ImageTransport { // e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref ociReference) StringWithinTransport() string { - return fmt.Sprintf("%s:%s", ref.dir, ref.image) + if ref.sourceIndex == -1 { + return fmt.Sprintf("%s:%s", ref.dir, ref.image) + } + return fmt.Sprintf("%s:@%d", ref.dir, ref.sourceIndex) } // DockerReference returns a Docker reference associated with this reference @@ -187,19 +219,23 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro return imgspecv1.Descriptor{}, -1, err } - if ref.image == "" { - // return manifest if only one image is in the oci directory - if len(index.Manifests) != 1 { - // ask user to choose image when more than one image in the oci directory - return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage + switch { + case ref.image != "" && ref.sourceIndex != -1: // Coverage: newReference refuses to create such references. + return imgspecv1.Descriptor{}, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d", + ref.image, ref.sourceIndex) + + case ref.sourceIndex != -1: + if ref.sourceIndex >= len(index.Manifests) { + return imgspecv1.Descriptor{}, -1, fmt.Errorf("index %d is too large, only %d entries available", ref.sourceIndex, len(index.Manifests)) } - return index.Manifests[0], 0, nil - } else { + return index.Manifests[ref.sourceIndex], ref.sourceIndex, nil + + case ref.image != "": // if image specified, look through all manifests for a match var unsupportedMIMETypes []string for i, md := range index.Manifests { if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { - if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex { + if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex || md.MediaType == manifest.DockerV2Schema2MediaType || md.MediaType == manifest.DockerV2ListMediaType { return md, i, nil } unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType) @@ -208,8 +244,16 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro if len(unsupportedMIMETypes) != 0 { return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) } + return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref} + + default: + // return manifest if only one image is in the oci directory + if len(index.Manifests) != 1 { + // ask user to choose image when more than one image in the oci directory + return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage + } + return index.Manifests[0], 0, nil } - return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref} } // LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name diff --git a/vendor/github.com/containers/image/v5/oci/layout/reader.go b/vendor/github.com/containers/image/v5/oci/layout/reader.go new file mode 100644 index 00000000..112db2d7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/reader.go @@ -0,0 +1,52 @@ +package layout + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// This file is named reader.go for consistency with other transports’ +// handling of “image containers”, but we don’t actually need a stateful reader object. + +// ListResult wraps the image reference and the manifest for loading +type ListResult struct { + Reference types.ImageReference + ManifestDescriptor imgspecv1.Descriptor +} + +// List returns a slice of manifests included in the archive +func List(dir string) ([]ListResult, error) { + var res []ListResult + + indexJSON, err := os.ReadFile(filepath.Join(dir, imgspecv1.ImageIndexFile)) + if err != nil { + return nil, err + } + var index imgspecv1.Index + if err := json.Unmarshal(indexJSON, &index); err != nil { + return nil, err + } + + for manifestIndex, md := range index.Manifests { + refName := md.Annotations[imgspecv1.AnnotationRefName] + index := -1 + if refName == "" { + index = manifestIndex + } + ref, err := newReference(dir, refName, index) + if err != nil { + return nil, fmt.Errorf("error creating image reference: %w", err) + } + reference := ListResult{ + Reference: ref, + ManifestDescriptor: md, + } + res = append(res, reference) + } + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index cef3dccc..0bba3c21 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -571,8 +571,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { // merge all of the struct values in the reverse order so that priority is given correctly // errors are not added to the list the second time nonMapConfig := clientcmdNewConfig() - for i := len(kubeconfigs) - 1; i >= 0; i-- { - kubeconfig := kubeconfigs[i] + for _, kubeconfig := range slices.Backward(kubeconfigs) { if err := mergo.MergeWithOverwrite(nonMapConfig, kubeconfig); err != nil { return nil, err } @@ -921,7 +920,7 @@ func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func tlsConfigFor(c *restConfig) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { + if !c.HasCA() && !c.HasCertAuth() && !c.Insecure { return nil, nil } if c.HasCA() && c.Insecure { diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 61f69e93..bd5e77aa 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -22,6 +22,7 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) type openshiftImageDestination struct { @@ -111,6 +112,14 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool { return d.docker.SupportsPutBlobPartial() } +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (d *openshiftImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return d.docker.NoteOriginalOCIConfig(ociConfig, configErr) +} + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go deleted file mode 100644 index d4ebe413..00000000 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ /dev/null @@ -1,533 +0,0 @@ -//go:build containers_image_ostree -// +build containers_image_ostree - -package ostree - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "io/fs" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "syscall" - "time" - "unsafe" - - "github.com/containers/image/v5/internal/imagedestination/impl" - "github.com/containers/image/v5/internal/imagedestination/stubs" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/putblobdigest" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/fileutils" - "github.com/klauspost/pgzip" - "github.com/opencontainers/go-digest" - selinux "github.com/opencontainers/selinux/go-selinux" - "github.com/ostreedev/ostree-go/pkg/otbuiltin" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -type blobToImport struct { - Size int64 - Digest digest.Digest - BlobPath string -} - -type descriptor struct { - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` -} - -type fsLayersSchema1 struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type manifestSchema struct { - LayersDescriptors []descriptor `json:"layers"` - FSLayers []fsLayersSchema1 `json:"fsLayers"` -} - -type ostreeImageDestination struct { - impl.Compat - impl.PropertyMethodsInitialize - stubs.NoPutBlobPartialInitialize - stubs.AlwaysSupportsSignatures - - ref ostreeReference - manifest string - schema manifestSchema - tmpDirPath string - blobs map[string]*blobToImport - digest digest.Digest - signaturesLen int - repo *C.struct_OstreeRepo -} - -// newImageDestination returns an ImageDestination for writing to an existing ostree. -func newImageDestination(ref ostreeReference, tmpDirPath string) (private.ImageDestination, error) { - tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) - if err := ensureDirectoryExists(tmpDirPath); err != nil { - return nil, err - } - d := &ostreeImageDestination{ - PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ - SupportedManifestMIMETypes: []string{manifest.DockerV2Schema2MediaType}, - DesiredLayerCompression: types.PreserveOriginal, - AcceptsForeignLayerURLs: false, - MustMatchRuntimeOS: true, - IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil. - HasThreadSafePutBlob: false, - }), - NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), - - ref: ref, - manifest: "", - schema: manifestSchema{}, - tmpDirPath: tmpDirPath, - blobs: map[string]*blobToImport{}, - digest: "", - signaturesLen: 0, - repo: nil, - } - d.Compat = impl.AddCompat(d) - return d, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *ostreeImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *ostreeImageDestination) Close() error { - if d.repo != nil { - C.g_object_unref(C.gpointer(d.repo)) - } - return os.RemoveAll(d.tmpDirPath) -} - -// PutBlobWithOptions writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { - tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob") - if err != nil { - return private.UploadedBlob{}, err - } - - blobPath := filepath.Join(tmpDir, "content") - blobFile, err := os.Create(blobPath) - if err != nil { - return private.UploadedBlob{}, err - } - defer blobFile.Close() - - digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(blobFile, stream) - if err != nil { - return private.UploadedBlob{}, err - } - blobDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return private.UploadedBlob{}, err - } - - hash := blobDigest.Encoded() - d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath} - return private.UploadedBlob{Digest: blobDigest, Size: size}, nil -} - -func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { - entries, err := os.ReadDir(dir) - if err != nil { - return err - } - - for _, entry := range entries { - fullpath := filepath.Join(dir, entry.Name()) - if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { - if err := os.Remove(fullpath); err != nil { - return err - } - continue - } - - info, err := entry.Info() - if err != nil { - return err - } - if selinuxHnd != nil { - relPath, err := filepath.Rel(root, fullpath) - if err != nil { - return err - } - // Handle /exports/hostfs as a special case. Files under this directory are copied to the host, - // thus we benefit from maintaining the same SELinux label they would have on the host as we could - // use hard links instead of copying the files. - relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/")) - - relPathC := C.CString(relPath) - defer C.free(unsafe.Pointer(relPathC)) - var context *C.char - - res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) - if int(res) < 0 && err != syscall.ENOENT { - return fmt.Errorf("cannot selabel_lookup_raw %s: %w", relPath, err) - } - if int(res) == 0 { - defer C.freecon(context) - fullpathC := C.CString(fullpath) - defer C.free(unsafe.Pointer(fullpathC)) - res, err = C.lsetfilecon_raw(fullpathC, context) - if int(res) < 0 { - return fmt.Errorf("cannot setfilecon_raw %s to %s: %w", fullpath, C.GoString(context), err) - } - } - } - - if entry.IsDir() { - if usermode { - if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { - return err - } - } - err = fixFiles(selinuxHnd, root, fullpath, usermode) - if err != nil { - return err - } - } else if usermode && (entry.Type().IsRegular()) { - if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { - return err - } - } - } - - return nil -} - -func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { - opts := otbuiltin.NewCommitOptions() - opts.AddMetadataString = metadata - opts.Timestamp = time.Now() - // OCI layers have no parent OSTree commit - opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" - _, err := repo.Commit(root, branch, opts) - return err -} - -func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { - mfz := pgzip.NewWriter(output) - defer mfz.Close() - metaPacker := storage.NewJSONPacker(mfz) - - stream, err := os.OpenFile(file, os.O_RDONLY, 0) - if err != nil { - return "", -1, err - } - defer stream.Close() - - gzReader, err := archive.DecompressStream(stream) - if err != nil { - return "", -1, err - } - defer gzReader.Close() - - its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) - if err != nil { - return "", -1, err - } - - digester := digest.Canonical.Digester() - - written, err := io.Copy(digester.Hash(), its) - if err != nil { - return "", -1, err - } - - return digester.Digest(), written, nil -} - -func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { - // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded()) - destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Encoded(), "root") - if err := ensureDirectoryExists(destinationPath); err != nil { - return err - } - defer func() { - os.Remove(blob.BlobPath) - os.RemoveAll(destinationPath) - }() - - var tarSplitOutput bytes.Buffer - uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath) - if err != nil { - return err - } - - if os.Getuid() == 0 { - if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { - return err - } - if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil { - return err - } - } else { - os.MkdirAll(destinationPath, 0755) - if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { - return err - } - - if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil { - return err - } - } - return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), - fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize), - fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()), - fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) - -} - -func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded()) - destinationPath := filepath.Dir(blob.BlobPath) - - return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) -} - -// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { - return false, private.ReusedBlob{}, nil - } - if d.repo == nil { - repo, err := openRepo(d.ref.repo) - if err != nil { - return false, private.ReusedBlob{}, err - } - d.repo = repo - } - - if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly. - return false, private.ReusedBlob{}, err - } - branch := fmt.Sprintf("ociimage/%s", info.Digest.Encoded()) - - found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") - if err != nil || !found { - return found, private.ReusedBlob{}, err - } - - found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") - if err != nil || !found { - return found, private.ReusedBlob{}, err - } - - found, data, err = readMetadata(d.repo, branch, "docker.size") - if err != nil || !found { - return found, private.ReusedBlob{}, err - } - - size, err := strconv.ParseInt(data, 10, 64) - if err != nil { - return false, private.ReusedBlob{}, err - } - - return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil -} - -// PutManifest writes manifest to the destination. -// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so -// there can be no secondary manifests. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { - if instanceDigest != nil { - return errors.New(`Manifest lists are not supported by "ostree:"`) - } - - d.manifest = string(manifestBlob) - - if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { - return err - } - - manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) - if err := ensureParentDirectoryExists(manifestPath); err != nil { - return err - } - - digest, err := manifest.Digest(manifestBlob) - if err != nil { - return err - } - d.digest = digest - - return os.WriteFile(manifestPath, manifestBlob, 0644) -} - -// PutSignaturesWithFormat writes a set of signatures to the destination. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for -// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. -// MUST be called after PutManifest (signatures may reference manifest contents). -func (d *ostreeImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { - if instanceDigest != nil { - return errors.New(`Manifest lists are not supported by "ostree:"`) - } - - path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) - if err := ensureParentDirectoryExists(path); err != nil { - return err - } - - for i, sig := range signatures { - signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) - blob, err := signature.Blob(sig) - if err != nil { - return err - } - if err := os.WriteFile(signaturePath, blob, 0644); err != nil { - return err - } - } - d.signaturesLen = len(signatures) - return nil -} - -// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before CommitWithOptions() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) -func (d *ostreeImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - repo, err := otbuiltin.OpenRepo(d.ref.repo) - if err != nil { - return err - } - - _, err = repo.PrepareTransaction() - if err != nil { - return err - } - - var selinuxHnd *C.struct_selabel_handle - - if os.Getuid() == 0 && selinux.GetEnabled() { - selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) - if selinuxHnd == nil { - return fmt.Errorf("cannot open the SELinux DB: %w", err) - } - - defer C.selabel_close(selinuxHnd) - } - - checkLayer := func(hash string) error { - blob := d.blobs[hash] - // if the blob is not present in d.blobs then it is already stored in OSTree, - // and we don't need to import it. - if blob == nil { - return nil - } - err := d.importBlob(selinuxHnd, repo, blob) - if err != nil { - return err - } - - delete(d.blobs, hash) - return nil - } - for _, layer := range d.schema.LayersDescriptors { - if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly. - return err - } - hash := layer.Digest.Encoded() - if err = checkLayer(hash); err != nil { - return err - } - } - for _, layer := range d.schema.FSLayers { - if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly. - return err - } - hash := layer.BlobSum.Encoded() - if err = checkLayer(hash); err != nil { - return err - } - } - - // Import the other blobs that are not layers - for _, blob := range d.blobs { - err := d.importConfig(repo, blob) - if err != nil { - return err - } - } - - manifestPath := filepath.Join(d.tmpDirPath, "manifest") - - metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), - fmt.Sprintf("signatures=%d", d.signaturesLen), - fmt.Sprintf("docker.digest=%s", string(d.digest))} - if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil { - return err - } - - _, err = repo.CommitTransaction() - return err -} - -func ensureDirectoryExists(path string) error { - if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - } - return nil -} - -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) -} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go deleted file mode 100644 index 0b597ce2..00000000 --- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go +++ /dev/null @@ -1,455 +0,0 @@ -//go:build containers_image_ostree -// +build containers_image_ostree - -package ostree - -import ( - "bytes" - "context" - "encoding/base64" - "errors" - "fmt" - "io" - "strconv" - "strings" - "unsafe" - - "github.com/containers/image/v5/internal/imagesource/impl" - "github.com/containers/image/v5/internal/imagesource/stubs" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/ioutils" - "github.com/klauspost/pgzip" - digest "github.com/opencontainers/go-digest" - glib "github.com/ostreedev/ostree-go/pkg/glibobject" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -type ostreeImageSource struct { - impl.Compat - impl.PropertyMethodsInitialize - stubs.NoGetBlobAtInitialize - - ref ostreeReference - tmpDir string - repo *C.struct_OstreeRepo - // get the compressed layer by its uncompressed checksum - compressed map[digest.Digest]digest.Digest -} - -// newImageSource returns an ImageSource for reading from an existing directory. -func newImageSource(tmpDir string, ref ostreeReference) (private.ImageSource, error) { - s := &ostreeImageSource{ - PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ - HasThreadSafeGetBlob: false, - }), - NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), - - ref: ref, - tmpDir: tmpDir, - compressed: nil, - } - s.Compat = impl.AddCompat(s) - return s, nil -} - -// Reference returns the reference used to set up this source. -func (s *ostreeImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *ostreeImageSource) Close() error { - if s.repo != nil { - C.g_object_unref(C.gpointer(s.repo)) - } - return nil -} - -func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) { - var metadataKey string - if isCompressed { - metadataKey = "docker.uncompressed_size" - } else { - metadataKey = "docker.size" - } - b := fmt.Sprintf("ociimage/%s", blob) - found, data, err := readMetadata(s.repo, b, metadataKey) - if err != nil || !found { - return 0, err - } - return strconv.ParseInt(data, 10, 64) -} - -func (s *ostreeImageSource) getLenSignatures() (int64, error) { - b := fmt.Sprintf("ociimage/%s", s.ref.branchName) - found, data, err := readMetadata(s.repo, b, "signatures") - if err != nil { - return -1, err - } - if !found { - // if 'signatures' is not present, just return 0 signatures. - return 0, nil - } - return strconv.ParseInt(data, 10, 64) -} - -func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { - b := fmt.Sprintf("ociimage/%s", blob) - found, out, err := readMetadata(s.repo, b, "tarsplit.output") - if err != nil || !found { - return nil, err - } - return base64.StdEncoding.DecodeString(out) -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, -// as the primary manifest can not be a list, so there can be non-default instances. -func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", errors.New(`Manifest lists are not supported by "ostree:"`) - } - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, "", err - } - s.repo = repo - } - - b := fmt.Sprintf("ociimage/%s", s.ref.branchName) - found, out, err := readMetadata(s.repo, b, "docker.manifest") - if err != nil { - return nil, "", err - } - if !found { - return nil, "", errors.New("manifest not found") - } - m := []byte(out) - return m, manifest.GuessMIMEType(m), nil -} - -func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return nil, "", errors.New("manifest lists are not supported by this transport") -} - -func openRepo(path string) (*C.struct_OstreeRepo, error) { - var cerr *C.GError - cpath := C.CString(path) - defer C.free(unsafe.Pointer(cpath)) - file := C.g_file_new_for_path(cpath) - defer C.g_object_unref(C.gpointer(file)) - repo := C.ostree_repo_new(file) - r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) - if !r { - C.g_object_unref(C.gpointer(repo)) - return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - return repo, nil -} - -type ostreePathFileGetter struct { - repo *C.struct_OstreeRepo - parentRoot *C.GFile -} - -type ostreeReader struct { - stream *C.GFileInputStream -} - -func (o ostreeReader) Close() error { - C.g_object_unref(C.gpointer(o.stream)) - return nil -} -func (o ostreeReader) Read(p []byte) (int, error) { - var cerr *C.GError - instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) - stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) - - b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) - if b == nil { - return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - defer C.g_bytes_unref(b) - - count := int(C.g_bytes_get_size(b)) - if count == 0 { - return 0, io.EOF - } - data := unsafe.Slice((*byte)(C.g_bytes_get_data(b, nil)), count) - copy(p, data) - return count, nil -} - -func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { - var cerr *C.GError - var ref *C.char - defer C.free(unsafe.Pointer(ref)) - - cCommit := C.CString(commit) - defer C.free(unsafe.Pointer(cCommit)) - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { - return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - if ref == nil { - return false, "", nil - } - - var variant *C.GVariant - if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { - return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - defer C.g_variant_unref(variant) - if variant != nil { - cKey := C.CString(key) - defer C.free(unsafe.Pointer(cKey)) - - metadata := C.g_variant_get_child_value(variant, 0) - defer C.g_variant_unref(metadata) - - data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) - if data != nil { - defer C.g_variant_unref(data) - ptr := (*C.char)(C.g_variant_get_string(data, nil)) - val := C.GoString(ptr) - return true, val, nil - } - } - return false, "", nil -} - -func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { - var cerr *C.GError - var parentRoot *C.GFile - cCommit := C.CString(commit) - defer C.free(unsafe.Pointer(cCommit)) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { - return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - C.g_object_ref(C.gpointer(repo)) - - return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil -} - -func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { - var file *C.GFile - if strings.HasPrefix(filename, "./") { - filename = filename[2:] - } - cfilename := C.CString(filename) - defer C.free(unsafe.Pointer(cfilename)) - - file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) - - var cerr *C.GError - stream := C.g_file_read(file, nil, &cerr) - if stream == nil { - return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - return &ostreeReader{stream: stream}, nil -} - -func (o ostreePathFileGetter) Close() { - C.g_object_unref(C.gpointer(o.repo)) - C.g_object_unref(C.gpointer(o.parentRoot)) -} - -func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { - getter, err := newOSTreePathFileGetter(s.repo, commit) - if err != nil { - return nil, err - } - defer getter.Close() - - return getter.Get(path) -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly. - return nil, -1, err - } - blob := info.Digest.Encoded() - - // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. - if s.compressed == nil { - _, err := s.LayerInfosForCopy(ctx, nil) - if err != nil { - return nil, -1, err - } - - } - compressedBlob, isCompressed := s.compressed[info.Digest] - if isCompressed { - blob = compressedBlob.Encoded() - } - branch := fmt.Sprintf("ociimage/%s", blob) - - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, 0, err - } - s.repo = repo - } - - layerSize, err := s.getBlobUncompressedSize(blob, isCompressed) - if err != nil { - return nil, 0, err - } - - tarsplit, err := s.getTarSplitData(blob) - if err != nil { - return nil, 0, err - } - - // if tarsplit is nil we are looking at the manifest. Return directly the file in /content - if tarsplit == nil { - file, err := s.readSingleFile(branch, "/content") - if err != nil { - return nil, 0, err - } - return file, layerSize, nil - } - - mf := bytes.NewReader(tarsplit) - mfz, err := pgzip.NewReader(mf) - if err != nil { - return nil, 0, err - } - metaUnpacker := storage.NewJSONUnpacker(mfz) - - getter, err := newOSTreePathFileGetter(s.repo, branch) - if err != nil { - mfz.Close() - return nil, 0, err - } - - ots := asm.NewOutputTarStream(getter, metaUnpacker) - - rc := ioutils.NewReadCloserWrapper(ots, func() error { - getter.Close() - mfz.Close() - return ots.Close() - }) - return rc, layerSize, nil -} - -// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *ostreeImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { - if instanceDigest != nil { - return nil, errors.New(`Manifest lists are not supported by "ostree:"`) - } - lenSignatures, err := s.getLenSignatures() - if err != nil { - return nil, err - } - branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) - - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, err - } - s.repo = repo - } - - signatures := []signature.Signature{} - for i := int64(1); i <= lenSignatures; i++ { - path := fmt.Sprintf("/signature-%d", i) - sigReader, err := s.readSingleFile(branch, path) - if err != nil { - return nil, err - } - defer sigReader.Close() - - sigBlob, err := io.ReadAll(sigReader) - if err != nil { - return nil, err - } - sig, err := signature.FromBlob(sigBlob) - if err != nil { - return nil, fmt.Errorf("parsing signature %q: %w", path, err) - } - signatures = append(signatures, sig) - } - return signatures, nil -} - -// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer -// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() -// to read the image's layers. -// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, -// as the primary manifest can not be a list, so there can be secondary manifests. -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { - if instanceDigest != nil { - return nil, errors.New(`Manifest lists are not supported by "ostree:"`) - } - - updatedBlobInfos := []types.BlobInfo{} - manifestBlob, manifestType, err := s.GetManifest(ctx, nil) - if err != nil { - return nil, err - } - - man, err := manifest.FromBlob(manifestBlob, manifestType) - - s.compressed = make(map[digest.Digest]digest.Digest) - - layerBlobs := man.LayerInfos() - - for _, layerBlob := range layerBlobs { - branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Encoded()) - found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest") - if err != nil || !found { - return nil, err - } - - found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size") - if err != nil || !found { - return nil, err - } - - uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64) - if err != nil { - return nil, err - } - uncompressedDigest, err := digest.Parse(uncompressedDigestStr) - if err != nil { - return nil, err - } - blobInfo := types.BlobInfo{ - Digest: uncompressedDigest, - Size: uncompressedSize, - MediaType: layerBlob.MediaType, - } - s.compressed[uncompressedDigest] = layerBlob.Digest - updatedBlobInfos = append(updatedBlobInfos, blobInfo) - } - return updatedBlobInfos, nil -} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go deleted file mode 100644 index d83f85b9..00000000 --- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go +++ /dev/null @@ -1,242 +0,0 @@ -//go:build containers_image_ostree -// +build containers_image_ostree - -package ostree - -import ( - "bytes" - "context" - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/containers/image/v5/directory/explicitfilepath" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/image" - "github.com/containers/image/v5/transports" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/regexp" -) - -const defaultOSTreeRepo = "/ostree/repo" - -// Transport is an ImageTransport for ostree paths. -var Transport = ostreeTransport{} - -type ostreeTransport struct{} - -func (t ostreeTransport) Name() string { - return "ostree" -} - -func init() { - transports.Register(Transport) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { - sep := strings.Index(scope, ":") - if sep < 0 { - return fmt.Errorf("Invalid ostree: scope %s: Must include a repo", scope) - } - repo := scope[:sep] - - if !strings.HasPrefix(repo, "/") { - return fmt.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) - } - cleaned := filepath.Clean(repo) - if cleaned != repo { - return fmt.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - - // FIXME? In the namespaces within a repo, - // we could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// ostreeReference is an ImageReference for ostree paths. -type ostreeReference struct { - image string - branchName string - repo string -} - -type ostreeImageCloser struct { - types.ImageCloser - size int64 -} - -func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { - var repo = "" - image, repoPart, gotRepoPart := strings.Cut(ref, "@/") - if !gotRepoPart { - repo = defaultOSTreeRepo - } else { - repo = "/" + repoPart - } - - return NewReference(image, repo) -} - -// NewReference returns an OSTree reference for a specified repo and image. -func NewReference(image string, repo string) (types.ImageReference, error) { - // image is not _really_ in a containers/image/docker/reference format; - // as far as the libOSTree ociimage/* namespace is concerned, it is more or - // less an arbitrary string with an implied tag. - // Parse the image using reference.ParseNormalizedNamed so that we can - // check whether the images has a tag specified and we can add ":latest" if needed - ostreeImage, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, err - } - - if reference.IsNameOnly(ostreeImage) { - image = image + ":latest" - } - - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) - if err != nil { - // With os.IsNotExist(err), the parent directory of repo is also not existent; - // that should ordinarily not happen, but it would be a bit weird to reject - // references which do not specify a repo just because the implicit defaultOSTreeRepo - // does not exist. - if os.IsNotExist(err) && repo == defaultOSTreeRepo { - resolved = repo - } else { - return nil, err - } - } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, fmt.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) - } - - return ostreeReference{ - image: image, - branchName: encodeOStreeRef(image), - repo: resolved, - }, nil -} - -func (ref ostreeReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref ostreeReference) StringWithinTransport() string { - return fmt.Sprintf("%s@%s", ref.image, ref.repo) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref ostreeReference) DockerReference() reference.Named { - return nil -} - -func (ref ostreeReference) PolicyConfigurationIdentity() string { - return fmt.Sprintf("%s:%s", ref.repo, ref.image) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref ostreeReference) PolicyConfigurationNamespaces() []string { - repo, _, gotTag := strings.Cut(ref.image, ":") - if !gotTag { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. - panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) - } - name := repo - res := []string{} - for { - res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) - - lastSlash := strings.LastIndex(name, "/") - if lastSlash == -1 { - break - } - name = name[:lastSlash] - } - return res -} - -func (s *ostreeImageCloser) Size() (int64, error) { - return s.size, nil -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - return image.FromReference(ctx, sys, ref) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - var tmpDir string - if sys == nil || sys.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = sys.OSTreeTmpDirPath - } - return newImageSource(tmpDir, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - var tmpDir string - if sys == nil || sys.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = sys.OSTreeTmpDirPath - } - return newImageDestination(ref, tmpDir) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.New("Deleting images not implemented for ostree: images") -} - -var ostreeRefRegexp = regexp.Delayed(`^[A-Za-z0-9.-]$`) - -func encodeOStreeRef(in string) string { - var buffer bytes.Buffer - for i := range in { - sub := in[i : i+1] - if ostreeRefRegexp.MatchString(sub) { - buffer.WriteString(sub) - } else { - buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) - } - - } - return buffer.String() -} - -// manifestPath returns a path for the manifest within a ostree using our conventions. -func (ref ostreeReference) manifestPath() string { - return filepath.Join("manifest", "manifest.json") -} - -// signaturePath returns a path for a signature within a ostree using our conventions. -func (ref ostreeReference) signaturePath(index int) string { - return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) -} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 9d4125d6..8e513d41 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -240,7 +240,7 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map if otherDigests != nil { - for _, d := range otherDigests.Values() { + for d := range otherDigests.All() { if d != primaryDigest && d != uncompressedDigest { res = mem.appendReplacementCandidates(res, transport, scope, d, v2Options) } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go index 1a793102..719c8eda 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -87,14 +87,20 @@ func new2(path string) (*cache, error) { if err != nil { return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err) } - defer db.Close() - - // We don’t check the schema before every operation, because that would be costly - // and because we assume schema changes will be handled by using a different path. - if err := ensureDBHasCurrentSchema(db); err != nil { + err = func() (retErr error) { // A scope for defer + defer func() { + closeErr := db.Close() + if retErr == nil { + retErr = closeErr + } + }() + // We don’t check the schema before every operation, because that would be costly + // and because we assume schema changes will be handled by using a different path. + return ensureDBHasCurrentSchema(db) + }() + if err != nil { return nil, err } - return &cache{ path: path, refCount: 0, @@ -147,25 +153,30 @@ func (sqc *cache) Close() { type void struct{} // So that we don’t have to write struct{}{} all over the place // transaction calls fn within a read-write transaction in sqc. -func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) { - db, closeDB, err := func() (*sql.DB, func(), error) { // A scope for defer +func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (_ T, retErr error) { + db, closeDB, err := func() (*sql.DB, func() error, error) { // A scope for defer sqc.lock.Lock() defer sqc.lock.Unlock() if sqc.db != nil { - return sqc.db, func() {}, nil + return sqc.db, func() error { return nil }, nil } db, err := rawOpen(sqc.path) if err != nil { return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err) } - return db, func() { db.Close() }, nil + return db, db.Close, nil }() if err != nil { var zeroRes T // A zero value of T return zeroRes, err } - defer closeDB() + defer func() { + closeErr := closeDB() + if retErr == nil { + retErr = closeErr + } + }() return dbTransaction(db, fn) } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index da2238a0..243b13c8 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -6,6 +6,8 @@ import ( "errors" "fmt" "io/fs" + "iter" + "maps" "os" "os/exec" "path/filepath" @@ -93,9 +95,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Credential helpers in the auth file have a // direct mapping to a registry, so we can just // walk the map. - for registry := range fileContents.CredHelpers { - allKeys.Add(registry) - } + allKeys.AddSeq(maps.Keys(fileContents.CredHelpers)) for key := range fileContents.AuthConfigs { key := normalizeAuthFileKey(key, path.legacyFormat) if key == normalizedDockerIORegistry { @@ -115,16 +115,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon return nil, err } } - for registry := range creds { - allKeys.Add(registry) - } + allKeys.AddSeq(maps.Keys(creds)) } } // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. allCreds := make(map[string]types.DockerAuthConfig) - for _, key := range allKeys.Values() { + for key := range allKeys.All() { creds, err := GetCredentials(sys, key) if err != nil { // Note: we rely on the logging in `GetCredentials`. @@ -818,16 +816,10 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut // Support sub-registry namespaces in auth. // (This is not a feature of ~/.docker/config.json; we support it even for // those files as an extension.) - var keys []string - if !path.legacyFormat { - keys = authKeysForKey(key) - } else { - keys = []string{registry} - } - + // // Repo or namespace keys are only supported as exact matches. For registry // keys we prefer exact matches as well. - for _, key := range keys { + for key := range authKeyLookupOrder(key, registry, path.legacyFormat) { if val, exists := fileContents.AuthConfigs[key]; exists { return decodeDockerAuth(path.path, key, val) } @@ -854,25 +846,33 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut return types.DockerAuthConfig{}, nil } -// authKeysForKey returns the keys matching a provided auth file key, in order -// from the best match to worst. For example, +// authKeyLookupOrder returns a sequence for lookup keys matching (key or registry) +// in file with legacyFormat, in order from the best match to worst. +// For example, in a non-legacy file, // when given a repository key "quay.io/repo/ns/image", it returns // - quay.io/repo/ns/image // - quay.io/repo/ns // - quay.io/repo // - quay.io -func authKeysForKey(key string) (res []string) { - for { - res = append(res, key) +func authKeyLookupOrder(key, registry string, legacyFormat bool) iter.Seq[string] { + return func(yield func(string) bool) { + if legacyFormat { + _ = yield(registry) // We stop in any case + return + } + + for { + if !yield(key) { + return + } - lastSlash := strings.LastIndex(key, "/") - if lastSlash == -1 { - break + lastSlash := strings.LastIndex(key, "/") + if lastSlash == -1 { + break + } + key = key[:lastSlash] } - key = key[:lastSlash] } - - return res } // decodeDockerAuth decodes the username and password from conf, diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go index 07fe5029..c9e8ac5c 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package sysregistriesv2 diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go index 741b99f8..7dada4b7 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package sysregistriesv2 diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 71f5bc83..677629c5 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -134,7 +134,7 @@ func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Nam // editShortNameAlias loads the aliases.conf file and changes it. If value is // set, it adds the name-value pair as a new alias. Otherwise, it will remove // name from the config. -func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error { +func editShortNameAlias(ctx *types.SystemContext, name string, value *string) (retErr error) { if err := validateShortName(name); err != nil { return err } @@ -178,7 +178,13 @@ func editShortNameAlias(ctx *types.SystemContext, name string, value *string) er if err != nil { return err } - defer f.Close() + // since we are writing to this file, make sure we handle err on Close() + defer func() { + closeErr := f.Close() + if retErr == nil { + retErr = closeErr + } + }() encoder := toml.NewEncoder(f) return encoder.Encode(conf) @@ -229,7 +235,7 @@ func parseShortNameValue(alias string) (reference.Named, error) { } registry := reference.Domain(named) - if !(strings.ContainsAny(registry, ".:") || registry == "localhost") { + if !strings.ContainsAny(registry, ".:") && registry != "localhost" { return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 1b161474..318988f0 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -1,11 +1,14 @@ package sysregistriesv2 import ( + "errors" "fmt" "io/fs" + "maps" "os" "path/filepath" "reflect" + "slices" "sort" "strings" "sync" @@ -17,7 +20,6 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/regexp" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" ) // systemRegistriesConfPath is the path to the system-wide registry @@ -429,7 +431,8 @@ func (config *V2RegistriesConf) postProcessRegistries() error { return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix) } // make sure mirrors are valid - for _, mir := range reg.Mirrors { + for j := range reg.Mirrors { + mir := ®.Mirrors[j] mir.Location, err = parseLocation(mir.Location) if err != nil { return err @@ -744,6 +747,11 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC // Enforce v2 format for drop-in-configs. dropIn, err := loadConfigFile(path, true) if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // file must have been removed between the directory listing + // and the open call, ignore that as it is a expected race + continue + } return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err) } config.updateWithConfigurationFrom(dropIn) @@ -1034,12 +1042,10 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { } // Go maps have a non-deterministic order when iterating the keys, so - // we dump them in a slice and sort it to enforce some order in - // Registries slice. Some consumers of c/image (e.g., CRI-O) log the - // configuration where a non-deterministic order could easily cause - // confusion. - prefixes := maps.Keys(registryMap) - sort.Strings(prefixes) + // we sort the keys to enforce some order in Registries slice. + // Some consumers of c/image (e.g., CRI-O) log the configuration + // and a non-deterministic order could easily cause confusion. + prefixes := slices.Sorted(maps.Keys(registryMap)) c.partialV2.Registries = []Registry{} for _, prefix := range prefixes { diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index f6c0576e..4e0ee57e 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -3,6 +3,7 @@ package tlsclientconfig import ( "crypto/tls" "crypto/x509" + "errors" "fmt" "net" "net/http" @@ -36,12 +37,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { logrus.Debugf(" crt: %s", fullPath) data, err := os.ReadFile(fullPath) if err != nil { - if os.IsNotExist(err) { - // Dangling symbolic link? - // Race with someone who deleted the - // file after we read the directory's - // list of contents? - logrus.Warnf("error reading certificate %q: %v", fullPath, err) + if errors.Is(err, os.ErrNotExist) { + // file must have been removed between the directory listing + // and the open call, ignore that as it is a expected race continue } return err diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go index 70758ad4..43937b63 100644 --- a/vendor/github.com/containers/image/v5/sif/load.go +++ b/vendor/github.com/containers/image/v5/sif/load.go @@ -186,12 +186,18 @@ func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir // has an -o option that allows extracting a squashfs from the SIF file directly, // but that version is not currently available in RHEL 8. logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath) - if err := func() error { // A scope for defer + if err := func() (retErr error) { // A scope for defer f, err := os.Create(squashFSPath) if err != nil { return err } - defer f.Close() + // since we are writing to this file, make sure we handle err on Close() + defer func() { + closeErr := f.Close() + if retErr == nil { + retErr = closeErr + } + }() // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil { return err diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go index 31dfdd34..908dd0fd 100644 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -1,6 +1,3 @@ -//go:build !containers_image_fulcio_stub -// +build !containers_image_fulcio_stub - package signature import ( @@ -108,19 +105,10 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, } } - untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes) + untrustedCertificate, err := parseLeafCertFromPEM(untrustedCertificateBytes) if err != nil { - return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err)) - } - switch len(untrustedLeafCerts) { - case 0: - return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data") - case 1: - break // OK - default: - return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data") + return nil, err } - untrustedCertificate := untrustedLeafCerts[0] // Go rejects Subject Alternative Name that has no DNSNames, EmailAddresses, IPAddresses and URIs; // we match SAN ourselves, so override that. @@ -195,6 +183,21 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, return untrustedCertificate.PublicKey, nil } +func parseLeafCertFromPEM(untrustedCertificateBytes []byte) (*x509.Certificate, error) { + untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err)) + } + switch len(untrustedLeafCerts) { + case 0: + return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data") + case 1: // OK + return untrustedLeafCerts[0], nil + default: + return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data") + } +} + func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, untrustedPayloadBytes []byte) (crypto.PublicKey, error) { diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go deleted file mode 100644 index c0b48daf..00000000 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build containers_image_fulcio_stub -// +build containers_image_fulcio_stub - -package signature - -import ( - "crypto" - "crypto/ecdsa" - "crypto/x509" - "errors" -) - -type fulcioTrustRoot struct { - caCertificates *x509.CertPool - oidcIssuer string - subjectEmail string -} - -func (f *fulcioTrustRoot) validate() error { - return errors.New("fulcio disabled at compile-time") -} - -func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, - untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, - untrustedPayloadBytes []byte) (crypto.PublicKey, error) { - return nil, errors.New("fulcio disabled at compile-time") - -} diff --git a/vendor/github.com/containers/image/v5/signature/internal/errors.go b/vendor/github.com/containers/image/v5/signature/internal/errors.go index 7872f0f4..d21e8544 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/errors.go +++ b/vendor/github.com/containers/image/v5/signature/internal/errors.go @@ -13,3 +13,12 @@ func (err InvalidSignatureError) Error() string { func NewInvalidSignatureError(msg string) InvalidSignatureError { return InvalidSignatureError{msg: msg} } + +// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError. +// All other errors are returned as is. +func JSONFormatToInvalidSignatureError(err error) error { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) + } + return err +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_api_types.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_api_types.go new file mode 100644 index 00000000..7b941f53 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_api_types.go @@ -0,0 +1,95 @@ +package internal + +import ( + "bytes" + "encoding/json" + "fmt" +) + +const rekorHashedrekordKind = "hashedrekord" + +type RekorHashedrekord struct { + APIVersion *string `json:"apiVersion"` + Spec json.RawMessage `json:"spec"` +} + +func (m *RekorHashedrekord) Kind() string { + return rekorHashedrekordKind +} + +func (m *RekorHashedrekord) SetKind(val string) { +} + +func (m *RekorHashedrekord) UnmarshalJSON(raw []byte) error { + var base struct { + Kind string `json:"kind"` + } + dec := json.NewDecoder(bytes.NewReader(raw)) + dec.UseNumber() + if err := dec.Decode(&base); err != nil { + return err + } + + switch base.Kind { + case rekorHashedrekordKind: + var data struct { // We can’t use RekorHashedRekord directly, because that would be an infinite recursion. + APIVersion *string `json:"apiVersion"` + Spec json.RawMessage `json:"spec"` + } + dec = json.NewDecoder(bytes.NewReader(raw)) + dec.UseNumber() + if err := dec.Decode(&data); err != nil { + return err + } + res := RekorHashedrekord{ + APIVersion: data.APIVersion, + Spec: data.Spec, + } + *m = res + return nil + + default: + return fmt.Errorf("invalid kind value: %q", base.Kind) + } +} + +func (m RekorHashedrekord) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Kind string `json:"kind"` + APIVersion *string `json:"apiVersion"` + Spec json.RawMessage `json:"spec"` + }{ + Kind: m.Kind(), + APIVersion: m.APIVersion, + Spec: m.Spec, + }) +} + +type RekorHashedrekordV001Schema struct { + Data *RekorHashedrekordV001SchemaData `json:"data"` + Signature *RekorHashedrekordV001SchemaSignature `json:"signature"` +} + +type RekorHashedrekordV001SchemaData struct { + Hash *RekorHashedrekordV001SchemaDataHash `json:"hash,omitempty"` +} + +type RekorHashedrekordV001SchemaDataHash struct { + Algorithm *string `json:"algorithm"` + Value *string `json:"value"` +} + +const ( + RekorHashedrekordV001SchemaDataHashAlgorithmSha256 string = "sha256" + RekorHashedrekordV001SchemaDataHashAlgorithmSha384 string = "sha384" + RekorHashedrekordV001SchemaDataHashAlgorithmSha512 string = "sha512" +) + +type RekorHashedrekordV001SchemaSignature struct { + Content []byte `json:"content,omitempty"` + PublicKey *RekorHashedrekordV001SchemaSignaturePublicKey `json:"publicKey,omitempty"` +} + +type RekorHashedrekordV001SchemaSignaturePublicKey struct { + Content []byte `json:"content,omitempty"` +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go index bddaca69..1c20e496 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -1,6 +1,3 @@ -//go:build !containers_image_rekor_stub -// +build !containers_image_rekor_stub - package internal import ( @@ -15,12 +12,11 @@ import ( "time" "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" - "github.com/sigstore/rekor/pkg/generated/models" ) // This is the github.com/sigstore/rekor/pkg/generated/models.Hashedrekord.APIVersion for github.com/sigstore/rekor/pkg/generated/models.HashedrekordV001Schema. // We could alternatively use github.com/sigstore/rekor/pkg/types/hashedrekord.APIVERSION, but that subpackage adds too many dependencies. -const HashedRekordV001APIVersion = "0.0.1" +const RekorHashedRekordV001APIVersion = "0.0.1" // UntrustedRekorSET is a parsed content of the sigstore-signature Rekor SET // (note that this a signature-specific format, not a format directly used by the Rekor API). @@ -40,15 +36,6 @@ type UntrustedRekorPayload struct { // A compile-time check that UntrustedRekorSET implements json.Unmarshaler var _ json.Unmarshaler = (*UntrustedRekorSET)(nil) -// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError. -// All other errors are returned as is. -func JSONFormatToInvalidSignatureError(err error) error { - if formatErr, ok := err.(JSONFormatError); ok { - err = NewInvalidSignatureError(formatErr.Error()) - } - return err -} - // UnmarshalJSON implements the json.Unmarshaler interface func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data)) @@ -147,31 +134,20 @@ func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, un if err := json.Unmarshal(untrustedSETPayloadCanonicalBytes, &rekorPayload); err != nil { return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("parsing Rekor SET payload: %v", err.Error())) } - // FIXME: Use a different decoder implementation? The Swagger-generated code is kinda ridiculous, with the need to re-marshal - // hashedRekor.Spec and so on. - // Especially if we anticipate needing to decode different data formats… - // That would also allow being much more strict about JSON. - // - // Alternatively, rely on the existing .Validate() methods instead of manually checking for nil all over the place. - var hashedRekord models.Hashedrekord + // FIXME: Consider being much more strict about decoding JSON. + var hashedRekord RekorHashedrekord if err := json.Unmarshal(rekorPayload.Body, &hashedRekord); err != nil { return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding the body of a Rekor SET payload: %v", err)) } - // The decode of models.HashedRekord validates the "kind": "hashedrecord" field, which is otherwise invisible to us. + // The decode of HashedRekord validates the "kind": "hashedrecord" field, which is otherwise invisible to us. if hashedRekord.APIVersion == nil { return time.Time{}, NewInvalidSignatureError("missing Rekor SET Payload API version") } - if *hashedRekord.APIVersion != HashedRekordV001APIVersion { + if *hashedRekord.APIVersion != RekorHashedRekordV001APIVersion { return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("unsupported Rekor SET Payload hashedrekord version %#v", hashedRekord.APIVersion)) } - hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec) - if err != nil { - // Coverage: hashedRekord.Spec is an any that was just unmarshaled, - // so this should never fail. - return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err)) - } - var hashedRekordV001 models.HashedrekordV001Schema - if err := json.Unmarshal(hashedRekordV001Bytes, &hashedRekordV001); err != nil { + var hashedRekordV001 RekorHashedrekordV001Schema + if err := json.Unmarshal(hashedRekord.Spec, &hashedRekordV001); err != nil { return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding hashedrekod spec: %v", err)) } @@ -227,7 +203,7 @@ func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, un // Eventually we should support them as well. // Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now that’s not a compatibility // issue. - if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 { + if *hashedRekordV001.Data.Hash.Algorithm != RekorHashedrekordV001SchemaDataHashAlgorithmSha256 { return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm)) } if hashedRekordV001.Data.Hash.Value == nil { diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go deleted file mode 100644 index 7c121cc2..00000000 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build containers_image_rekor_stub -// +build containers_image_rekor_stub - -package internal - -import ( - "crypto/ecdsa" - "time" -) - -// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. -// Returns bundle upload time on success. -func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { - return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time") -} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go index 2b2a7ad8..8a8c5878 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -1,5 +1,4 @@ //go:build !containers_image_openpgp -// +build !containers_image_openpgp package signature diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go index bd1979ae..fea8590e 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -1,5 +1,4 @@ //go:build containers_image_openpgp -// +build containers_image_openpgp package signature diff --git a/vendor/github.com/containers/image/v5/signature/pki_cert.go b/vendor/github.com/containers/image/v5/signature/pki_cert.go new file mode 100644 index 00000000..20624540 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/pki_cert.go @@ -0,0 +1,74 @@ +package signature + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + "slices" + + "github.com/containers/image/v5/signature/internal" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type pkiTrustRoot struct { + caRootsCertificates *x509.CertPool + caIntermediateCertificates *x509.CertPool + subjectEmail string + subjectHostname string +} + +func (p *pkiTrustRoot) validate() error { + if p.subjectEmail == "" && p.subjectHostname == "" { + return errors.New("Internal inconsistency: PKI use set up without subject email or subject hostname") + } + return nil +} + +func verifyPKI(pkiTrustRoot *pkiTrustRoot, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) { + var untrustedIntermediatePool *x509.CertPool + if pkiTrustRoot.caIntermediateCertificates != nil { + untrustedIntermediatePool = pkiTrustRoot.caIntermediateCertificates.Clone() + } else { + untrustedIntermediatePool = x509.NewCertPool() + } + if len(untrustedIntermediateChainBytes) > 0 { + untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err)) + } + if len(untrustedIntermediateChain) > 1 { + for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] { + untrustedIntermediatePool.AddCert(untrustedIntermediateCert) + } + } + } + + untrustedCertificate, err := parseLeafCertFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, err + } + + if _, err := untrustedCertificate.Verify(x509.VerifyOptions{ + Intermediates: untrustedIntermediatePool, + Roots: pkiTrustRoot.caRootsCertificates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, + }); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err)) + } + + if pkiTrustRoot.subjectEmail != "" { + if !slices.Contains(untrustedCertificate.EmailAddresses, pkiTrustRoot.subjectEmail) { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)", + pkiTrustRoot.subjectEmail, + untrustedCertificate.EmailAddresses)) + } + } + if pkiTrustRoot.subjectHostname != "" { + if err = untrustedCertificate.VerifyHostname(pkiTrustRoot.subjectHostname); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected subject hostname: %v", err)) + } + } + + return untrustedCertificate.PublicKey, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go index 965901e1..6393b66e 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go @@ -71,6 +71,17 @@ func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedO } } +// PRSigstoreSignedWithPKI specifies a value for the "pki" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithPKI(p PRSigstoreSignedPKI) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.PKI != nil { + return InvalidPolicyFormatError(`"pki" already specified`) + } + pr.PKI = p + return nil + } +} + // PRSigstoreSignedWithRekorPublicKeyPath specifies a value for the "rekorPublicKeyPath" field when calling NewPRSigstoreSigned. func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { @@ -159,8 +170,11 @@ func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, if res.Fulcio != nil { keySources++ } + if res.PKI != nil { + keySources++ + } if keySources != 1 { - return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas and fulcio must be specified") + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas, fulcio, and pki must be specified") } rekorSources := 0 @@ -182,6 +196,9 @@ func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, if res.Fulcio != nil && rekorSources == 0 { return nil, InvalidPolicyFormatError("At least one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas must be specified if fulcio is used") } + if res.PKI != nil && rekorSources > 0 { + return nil, InvalidPolicyFormatError("rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas are not supported for pki") + } if res.SignedIdentity == nil { return nil, InvalidPolicyFormatError("signedIdentity not specified") @@ -218,9 +235,10 @@ var _ json.Unmarshaler = (*prSigstoreSigned)(nil) func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { *pr = prSigstoreSigned{} var tmp prSigstoreSigned - var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio bool + var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio, gotPKI bool var gotRekorPublicKeyPath, gotRekorPublicKeyPaths, gotRekorPublicKeyData, gotRekorPublicKeyDatas bool var fulcio prSigstoreSignedFulcio + var pki prSigstoreSignedPKI var signedIdentity json.RawMessage if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { switch key { @@ -253,6 +271,9 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { case "rekorPublicKeyDatas": gotRekorPublicKeyDatas = true return &tmp.RekorPublicKeyDatas + case "pki": + gotPKI = true + return &pki case "signedIdentity": return &signedIdentity default: @@ -303,6 +324,9 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { if gotRekorPublicKeyDatas { opts = append(opts, PRSigstoreSignedWithRekorPublicKeyDatas(tmp.RekorPublicKeyDatas)) } + if gotPKI { + opts = append(opts, PRSigstoreSignedWithPKI(&pki)) + } opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity)) res, err := newPRSigstoreSigned(opts...) @@ -440,3 +464,167 @@ func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error { *f = *res return nil } + +// PRSigstoreSignedPKIOption is a way to pass values to NewPRSigstoreSignedPKI +type PRSigstoreSignedPKIOption func(*prSigstoreSignedPKI) error + +// PRSigstoreSignedPKIWithCARootsPath specifies a value for the "caRootsPath" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCARootsPath(caRootsPath string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CARootsPath != "" { + return InvalidPolicyFormatError(`"caRootsPath" already specified`) + } + p.CARootsPath = caRootsPath + return nil + } +} + +// PRSigstoreSignedPKIWithCARootsData specifies a value for the "caRootsData" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCARootsData(caRootsData []byte) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CARootsData != nil { + return InvalidPolicyFormatError(`"caRootsData" already specified`) + } + p.CARootsData = caRootsData + return nil + } +} + +// PRSigstoreSignedPKIWithCAIntermediatesPath specifies a value for the "caIntermediatesPath" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCAIntermediatesPath(caIntermediatesPath string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CAIntermediatesPath != "" { + return InvalidPolicyFormatError(`"caIntermediatesPath" already specified`) + } + p.CAIntermediatesPath = caIntermediatesPath + return nil + } +} + +// PRSigstoreSignedPKIWithCAIntermediatesData specifies a value for the "caIntermediatesData" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithCAIntermediatesData(caIntermediatesData []byte) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.CAIntermediatesData != nil { + return InvalidPolicyFormatError(`"caIntermediatesData" already specified`) + } + p.CAIntermediatesData = caIntermediatesData + return nil + } +} + +// PRSigstoreSignedPKIWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithSubjectEmail(subjectEmail string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.SubjectEmail != "" { + return InvalidPolicyFormatError(`"subjectEmail" already specified`) + } + p.SubjectEmail = subjectEmail + return nil + } +} + +// PRSigstoreSignedPKIWithSubjectHostname specifies a value for the "subjectHostname" field when calling NewPRSigstoreSignedPKI +func PRSigstoreSignedPKIWithSubjectHostname(subjectHostname string) PRSigstoreSignedPKIOption { + return func(p *prSigstoreSignedPKI) error { + if p.SubjectHostname != "" { + return InvalidPolicyFormatError(`"subjectHostname" already specified`) + } + p.SubjectHostname = subjectHostname + return nil + } +} + +// newPRSigstoreSignedPKI is NewPRSigstoreSignedPKI, except it returns the private type +func newPRSigstoreSignedPKI(options ...PRSigstoreSignedPKIOption) (*prSigstoreSignedPKI, error) { + res := prSigstoreSignedPKI{} + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + if res.CARootsPath != "" && res.CARootsData != nil { + return nil, InvalidPolicyFormatError("caRootsPath and caRootsData cannot be used simultaneously") + } + if res.CARootsPath == "" && res.CARootsData == nil { + return nil, InvalidPolicyFormatError("At least one of caRootsPath and caRootsData must be specified") + } + + if res.CAIntermediatesPath != "" && res.CAIntermediatesData != nil { + return nil, InvalidPolicyFormatError("caIntermediatesPath and caIntermediatesData cannot be used simultaneously") + } + + if res.SubjectEmail == "" && res.SubjectHostname == "" { + return nil, InvalidPolicyFormatError("At least one of subjectEmail, subjectHostname must be specified") + } + + return &res, nil +} + +// NewPRSigstoreSignedPKI returns a PRSigstoreSignedPKI based on options. +func NewPRSigstoreSignedPKI(options ...PRSigstoreSignedPKIOption) (PRSigstoreSignedPKI, error) { + return newPRSigstoreSignedPKI(options...) +} + +// Compile-time check that prSigstoreSignedPKI implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSignedPKI)(nil) + +func (p *prSigstoreSignedPKI) UnmarshalJSON(data []byte) error { + *p = prSigstoreSignedPKI{} + var tmp prSigstoreSignedPKI + var gotCARootsPath, gotCARootsData, gotCAIntermediatesPath, gotCAIntermediatesData, gotSubjectEmail, gotSubjectHostname bool + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "caRootsPath": + gotCARootsPath = true + return &tmp.CARootsPath + case "caRootsData": + gotCARootsData = true + return &tmp.CARootsData + case "caIntermediatesPath": + gotCAIntermediatesPath = true + return &tmp.CAIntermediatesPath + case "caIntermediatesData": + gotCAIntermediatesData = true + return &tmp.CAIntermediatesData + case "subjectEmail": + gotSubjectEmail = true + return &tmp.SubjectEmail + case "subjectHostname": + gotSubjectHostname = true + return &tmp.SubjectHostname + default: + return nil + } + }); err != nil { + return err + } + + var opts []PRSigstoreSignedPKIOption + if gotCARootsPath { + opts = append(opts, PRSigstoreSignedPKIWithCARootsPath(tmp.CARootsPath)) + } + if gotCARootsData { + opts = append(opts, PRSigstoreSignedPKIWithCARootsData(tmp.CARootsData)) + } + if gotCAIntermediatesPath { + opts = append(opts, PRSigstoreSignedPKIWithCAIntermediatesPath(tmp.CAIntermediatesPath)) + } + if gotCAIntermediatesData { + opts = append(opts, PRSigstoreSignedPKIWithCAIntermediatesData(tmp.CAIntermediatesData)) + } + if gotSubjectEmail { + opts = append(opts, PRSigstoreSignedPKIWithSubjectEmail(tmp.SubjectEmail)) + } + if gotSubjectHostname { + opts = append(opts, PRSigstoreSignedPKIWithSubjectHostname(tmp.SubjectHostname)) + } + + res, err := newPRSigstoreSignedPKI(opts...) + if err != nil { + return err + } + + *p = *res + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go index 9c553771..faede787 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go @@ -97,11 +97,64 @@ func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) { return &fulcio, nil } +// prepareTrustRoot creates a pkiTrustRoot from the input data. +// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedPKI is the only one.) +func (p *prSigstoreSignedPKI) prepareTrustRoot() (*pkiTrustRoot, error) { + caRootsCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caRootsPath" and "caRootsData" specified`, + path: p.CARootsPath, + data: p.CARootsData, + }) + if err != nil { + return nil, err + } + if len(caRootsCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: PKI specified with not exactly one of "caRootsPath" nor "caRootsData"`) + } + rootsCerts := x509.NewCertPool() + if ok := rootsCerts.AppendCertsFromPEM(caRootsCertPEMs[0]); !ok { + return nil, errors.New("error loading PKI CA Roots certificates") + } + pki := pkiTrustRoot{ + caRootsCertificates: rootsCerts, + subjectEmail: p.SubjectEmail, + subjectHostname: p.SubjectHostname, + } + caIntermediateCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caIntermediatesPath" and "caIntermediatesData" specified`, + path: p.CAIntermediatesPath, + data: p.CAIntermediatesData, + }) + if err != nil { + return nil, err + } + if caIntermediateCertPEMs != nil { + if len(caIntermediateCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: PKI specified with invalid value from "caIntermediatesPath" or "caIntermediatesData"`) + } + intermediatePool := x509.NewCertPool() + trustedIntermediates, err := cryptoutils.UnmarshalCertificatesFromPEM(caIntermediateCertPEMs[0]) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading trusted intermediate certificates: %v", err)) + } + for _, trustedIntermediateCert := range trustedIntermediates { + intermediatePool.AddCert(trustedIntermediateCert) + } + pki.caIntermediateCertificates = intermediatePool + } + + if err := pki.validate(); err != nil { + return nil, err + } + return &pki, nil +} + // sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy type sigstoreSignedTrustRoot struct { publicKeys []crypto.PublicKey fulcio *fulcioTrustRoot rekorPublicKeys []*ecdsa.PublicKey + pki *pkiTrustRoot } func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) { @@ -166,6 +219,14 @@ func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) } } + if pr.PKI != nil { + p, err := pr.PKI.prepareTrustRoot() + if err != nil { + return nil, err + } + res.pki = p + } + return &res, nil } @@ -189,13 +250,23 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva } untrustedPayload := sig.UntrustedPayload() + keySources := 0 + if trustRoot.publicKeys != nil { + keySources++ + } + if trustRoot.fulcio != nil { + keySources++ + } + if trustRoot.pki != nil { + keySources++ + } + var publicKeys []crypto.PublicKey switch { - case trustRoot.publicKeys != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations. - return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified") - case trustRoot.publicKeys == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations. - return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified") - + case keySources > 1: // newPRSigstoreSigned rejects more than one key sources. + return sarRejected, errors.New("Internal inconsistency: More than one of public key, Fulcio, or PKI specified") + case keySources == 0: // newPRSigstoreSigned rejects empty key sources. + return sarRejected, errors.New("Internal inconsistency: A public key, Fulcio, or PKI must be specified.") case trustRoot.publicKeys != nil: if trustRoot.rekorPublicKeys != nil { untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] @@ -254,6 +325,24 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva return sarRejected, err } publicKeys = []crypto.PublicKey{pk} + + case trustRoot.pki != nil: + if trustRoot.rekorPublicKeys != nil { // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: PKI specified with a Rekor public key") + } + untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey] + if !ok { + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey) + } + var untrustedIntermediateChainBytes []byte + if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok { + untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain) + } + pk, err := verifyPKI(trustRoot.pki, []byte(untrustedCert), untrustedIntermediateChainBytes) + if err != nil { + return sarRejected, err + } + publicKeys = []crypto.PublicKey{pk} } if len(publicKeys) == 0 { diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_common.go b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go index 290fc245..038351cb 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_paths_common.go +++ b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package signature diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go index 702b7171..6a45a78f 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go +++ b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package signature diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go index 32aa1c0a..2d107ed4 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_types.go +++ b/vendor/github.com/containers/image/v5/signature/policy_types.go @@ -111,16 +111,16 @@ type prSignedBaseLayer struct { type prSigstoreSigned struct { prCommon - // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. + // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. KeyPath string `json:"keyPath,omitempty"` - // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. + // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. KeyPaths []string `json:"keyPaths,omitempty"` - // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. + // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. KeyData []byte `json:"keyData,omitempty"` - // KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. + // KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. KeyDatas [][]byte `json:"keyDatas,omitempty"` - // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. + // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. // If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well. Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"` @@ -141,6 +141,9 @@ type prSigstoreSigned struct { // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). RekorPublicKeyDatas [][]byte `json:"rekorPublicKeyDatas,omitempty"` + // PKI specifies which PKI-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas, Fulcio, and PKI must be specified. + PKI PRSigstoreSignedPKI `json:"pki,omitempty"` + // SignedIdentity specifies what image identity the signature must be claiming about the image. // Defaults to "matchRepoDigestOrExact" if not specified. // Note that /usr/bin/cosign interoperability might require using repo-only matching. @@ -167,6 +170,30 @@ type prSigstoreSignedFulcio struct { SubjectEmail string `json:"subjectEmail,omitempty"` } +// PRSigstoreSignedPKI contains PKI configuration options for a "sigstoreSigned" PolicyRequirement. +type PRSigstoreSignedPKI interface { + // prepareTrustRoot creates a pkiTrustRoot from the input data. + // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedPKI is the only one.) + prepareTrustRoot() (*pkiTrustRoot, error) +} + +// prSigstoreSignedPKI contains non-fulcio certificate PKI configuration options for prSigstoreSigned +type prSigstoreSignedPKI struct { + // CARootsPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CARootsPath and CARootsData must be specified. + CARootsPath string `json:"caRootsPath,omitempty"` + // CARootsData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CARootsPath and CARootsData must be specified. + CARootsData []byte `json:"caRootsData,omitempty"` + // CAIntermediatesPath a path to a file containing accepted CA intermediate certificates, in PEM format. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both. + CAIntermediatesPath string `json:"caIntermediatesPath,omitempty"` + // CAIntermediatesData contains accepted CA intermediate certificates in PEM format, all of that base64-encoded. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both. + CAIntermediatesData []byte `json:"caIntermediatesData,omitempty"` + + // SubjectEmail specifies the expected email address imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified. + SubjectEmail string `json:"subjectEmail,omitempty"` + // SubjectHostname specifies the expected hostname imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified. + SubjectHostname string `json:"subjectHostname,omitempty"` +} + // PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. // The type is public, but its implementation is private. diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index 51fac71e..6d9b8cbf 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -1,5 +1,4 @@ //go:build !containers_image_storage_stub -// +build !containers_image_storage_stub package storage @@ -17,11 +16,13 @@ import ( "sync/atomic" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" + srcImpl "github.com/containers/image/v5/internal/imagesource/impl" + srcStubs "github.com/containers/image/v5/internal/imagesource/stubs" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" - "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/manifest" @@ -31,6 +32,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked" + "github.com/containers/storage/pkg/chunked/toc" "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -57,8 +59,9 @@ type storageImageDestination struct { imageRef storageReference directory string // Temporary directory where we store blobs until Commit() time nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - manifestDigest digest.Digest // Valid if len(manifest) != 0 + manifest []byte // (Per-instance) manifest contents, or nil if not yet known. + manifestMIMEType string // Valid if manifest != nil + manifestDigest digest.Digest // Valid if manifest != nil untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet signatures []byte // Signature contents, temporary signatureses map[digest.Digest][]byte // Instance signature contents, temporary @@ -108,8 +111,10 @@ type storageImageDestinationLockProtected struct { // // Ideally we wouldn’t have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller // to provide layer indices; and configs don’t have layer indices. blobDiffIDs needs to exist for those cases. - indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID - indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest + indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID + // Mapping from layer index to a TOC Digest. + // If this is set, then either c/storage/pkg/chunked/toc.GetTOCDigest must have returned a value, or indexToDiffID must be set as well. + indexToTOCDigest map[int]digest.Digest blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above. // Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames) @@ -121,6 +126,9 @@ type storageImageDestinationLockProtected struct { filenames map[digest.Digest]string // Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set. fileSizes map[digest.Digest]int64 + + // Config + configDigest digest.Digest // "" if N/A or not known yet. } // addedLayerInfo records data about a layer to use in this image. @@ -201,6 +209,18 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string { return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1))) } +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (s *storageImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + if configErr != nil { + return fmt.Errorf("writing to c/storage without a valid image config: %w", configErr) + } + s.setUntrustedDiffIDValuesFromOCIConfig(ociConfig) + return nil +} + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. @@ -214,7 +234,17 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream return info, err } - if options.IsConfig || options.LayerIndex == nil { + if options.IsConfig { + s.lock.Lock() + defer s.lock.Unlock() + if s.lockProtected.configDigest != "" { + return private.UploadedBlob{}, fmt.Errorf("after config %q, refusing to record another config %q", + s.lockProtected.configDigest.String(), info.Digest.String()) + } + s.lockProtected.configDigest = info.Digest + return info, nil + } + if options.LayerIndex == nil { return info, nil } @@ -241,43 +271,56 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf if err != nil { return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err) } - defer file.Close() - counter := ioutils.NewWriteCounter(file) - stream = io.TeeReader(stream, counter) - digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo) - decompressed, err := archive.DecompressStream(stream) - if err != nil { - return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err) - } + blobDigest, diffID, count, err := func() (_, _ digest.Digest, _ int64, retErr error) { // A scope for defer + // since we are writing to this file, make sure we handle err on Close() + defer func() { + closeErr := file.Close() + if retErr == nil { + retErr = closeErr + } + }() + counter := ioutils.NewWriteCounter(file) + stream = io.TeeReader(stream, counter) + digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo) + decompressed, err := archive.DecompressStream(stream) + if err != nil { + return "", "", 0, fmt.Errorf("setting up to decompress blob: %w", err) + + } + defer decompressed.Close() + + diffID := digest.Canonical.Digester() + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using context.Context. + _, err = io.Copy(diffID.Hash(), decompressed) + if err != nil { + return "", "", 0, fmt.Errorf("storing blob to file %q: %w", filename, err) + } - diffID := digest.Canonical.Digester() - // Copy the data to the file. - // TODO: This can take quite some time, and should ideally be cancellable using context.Context. - _, err = io.Copy(diffID.Hash(), decompressed) - decompressed.Close() + return digester.Digest(), diffID.Digest(), counter.Count, nil + }() if err != nil { - return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err) + return private.UploadedBlob{}, err } // Determine blob properties, and fail if information that we were given about the blob // is known to be incorrect. - blobDigest := digester.Digest() blobSize := blobinfo.Size if blobSize < 0 { - blobSize = counter.Count - } else if blobinfo.Size != counter.Count { + blobSize = count + } else if blobinfo.Size != count { return private.UploadedBlob{}, ErrBlobSizeMismatch } // Record information about the blob. s.lock.Lock() - s.lockProtected.blobDiffIDs[blobDigest] = diffID.Digest() - s.lockProtected.fileSizes[blobDigest] = counter.Count + s.lockProtected.blobDiffIDs[blobDigest] = diffID + s.lockProtected.fileSizes[blobDigest] = count s.lockProtected.filenames[blobDigest] = filename s.lock.Unlock() // This is safe because we have just computed diffID, and blobDigest was either computed // by us, or validated by the caller (usually copy.digestingReader). - options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + options.Cache.RecordDigestUncompressedPair(blobDigest, diffID) return private.UploadedBlob{ Digest: blobDigest, Size: blobSize, @@ -315,6 +358,56 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read // If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. // The fallback _must not_ be done otherwise. func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (_ private.UploadedBlob, retErr error) { + inputTOCDigest, err := toc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return private.UploadedBlob{}, err + } + + // The identity of partially-pulled layers is, as long as we keep compatibility with tar-like consumers, + // unfixably ambiguous: there are two possible “views” of the same file (same compressed digest), + // the traditional “view” that decompresses the primary stream and consumes a tar file, + // and the partial-pull “view” that starts with the TOC. + // The two “views” have two separate metadata sets and may refer to different parts of the blob for file contents; + // the direct way to ensure they are consistent would be to read the full primary stream (and authenticate it against + // the compressed digest), and ensure the metadata and layer contents exactly match the partially-pulled contents - + // making the partial pull completely pointless. + // + // Instead, for partial-pull-capable layers (with inputTOCDigest set), we require the image to “commit” + // to uncompressed layer digest values via the config's RootFS.DiffIDs array: + // they are already naturally computed for traditionally-pulled layers, and for partially-pulled layers we + // do the optimal partial pull, and then reconstruct the uncompressed tar stream just to (expensively) compute this digest. + // + // Layers which don’t support partial pulls (inputTOCDigest == "", incl. all schema1 layers) can be let through: + // the partial pull code will either not engage, or consume the full layer; and the rules of indexToTOCDigest / layerIdentifiedByTOC + // ensure the layer is identified by DiffID, i.e. using the traditional “view”. + // + // But if inputTOCDigest is set and the input image doesn't have RootFS.DiffIDs (the config is invalid for schema2/OCI), + // don't allow a partial pull, and fall back to PutBlobWithOptions. + // + // (The user can opt out of the DiffID commitment checking by a c/storage option, giving up security for performance, + // but we will still trigger the fall back here, and we will still enforce a DiffID match, so that the set of accepted images + // is the same in both cases, and so that users are not tempted to set the c/storage option to allow accepting some invalid images.) + var untrustedDiffID digest.Digest // "" if unknown + udid, err := s.untrustedLayerDiffID(options.LayerIndex) + if err != nil { + var diffIDUnknownErr untrustedLayerDiffIDUnknownError + switch { + case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable): + // PutBlobPartial is a private API, so all callers are within c/image, and should have called + // NoteOriginalOCIConfig first. + return private.UploadedBlob{}, fmt.Errorf("internal error: in PutBlobPartial, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable") + case errors.As(err, &diffIDUnknownErr): + if inputTOCDigest != nil { + return private.UploadedBlob{}, private.NewErrFallbackToOrdinaryLayerDownload(err) + } + untrustedDiffID = "" // A schema1 image or a non-TOC layer with no ambiguity, let it through + default: + return private.UploadedBlob{}, err + } + } else { + untrustedDiffID = udid + } + fetcher := zstdFetcher{ chunkAccessor: chunkAccessor, ctx: ctx, @@ -328,10 +421,11 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces } }() - differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher) + differ, err := chunked.NewDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher) if err != nil { return private.UploadedBlob{}, err } + defer differ.Close() out, err := s.imageRef.transport.store.PrepareStagedLayer(nil, differ) if err != nil { @@ -351,41 +445,64 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces blobDigest := srcInfo.Digest s.lock.Lock() - if out.UncompressedDigest != "" { - s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest - if out.TOCDigest != "" { - options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest) - } - // Don’t set indexToTOCDigest on this path: - // - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID. - // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. - // That TOC is quite unlikely to match any other TOC value. - - // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is - // responsible for ensuring blobDigest has been validated. - if out.CompressedDigest != blobDigest { - return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q", - out.CompressedDigest, blobDigest) - } - // So, record also information about blobDigest, that might benefit reuse. - // We trust PrepareStagedLayer to validate or create both values correctly. - s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest - options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest) - } else { - // Use diffID for layer identity if it is known. - if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" { - s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest + if err := func() error { // A scope for defer + defer s.lock.Unlock() + + // For true partial pulls, c/storage decides whether to compute the uncompressed digest based on an option in storage.conf + // (defaults to true, to avoid ambiguity.) + // c/storage can also be configured, to consume a layer not prepared for partial pulls (primarily to allow composefs conversion), + // and in that case it always consumes the full blob and always computes the uncompressed digest. + if out.UncompressedDigest != "" { + // This is centrally enforced later, in commitLayer, but because we have the value available, + // we might just as well check immediately. + if untrustedDiffID != "" && out.UncompressedDigest != untrustedDiffID { + return fmt.Errorf("uncompressed digest of layer %q is %q, config claims %q", srcInfo.Digest.String(), + out.UncompressedDigest.String(), untrustedDiffID.String()) + } + + s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest + if out.TOCDigest != "" { + s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest + options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest) + } + + // If the whole layer has been consumed, chunked.GetDiffer is responsible for ensuring blobDigest has been validated. + if out.CompressedDigest != "" { + if out.CompressedDigest != blobDigest { + return fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q", + out.CompressedDigest, blobDigest) + } + // So, record also information about blobDigest, that might benefit reuse. + // We trust PrepareStagedLayer to validate or create both values correctly. + s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest + options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest) + } + } else { + // Sanity-check the defined rules for indexToTOCDigest. + if inputTOCDigest == nil { + return fmt.Errorf("internal error: PrepareStagedLayer returned a TOC-only identity for layer %q with no TOC digest", srcInfo.Digest.String()) + } + + // Use diffID for layer identity if it is known. + if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" { + s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest + } + s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest } - s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest + s.lockProtected.diffOutputs[options.LayerIndex] = out + return nil + }(); err != nil { + return private.UploadedBlob{}, err } - s.lockProtected.diffOutputs[options.LayerIndex] = out - s.lock.Unlock() succeeded = true return private.UploadedBlob{ - Digest: blobDigest, - Size: srcInfo.Size, - }, nil + Digest: blobDigest, + Size: srcInfo.Size, + }, s.queueOrCommit(options.LayerIndex, addedLayerInfo{ + digest: blobDigest, + emptyLayer: options.EmptyLayer, + }) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination @@ -417,22 +534,43 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige if err := blobDigest.Validate(); err != nil { return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) } - if options.TOCDigest != "" { + useTOCDigest := false // If set, (options.TOCDigest != "" && options.LayerIndex != nil) AND we can use options.TOCDigest safely. + if options.TOCDigest != "" && options.LayerIndex != nil { if err := options.TOCDigest.Validate(); err != nil { return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) } + // Only consider using TOCDigest if we can avoid ambiguous image “views”, see the detailed comment in PutBlobPartial. + _, err := s.untrustedLayerDiffID(*options.LayerIndex) + if err != nil { + var diffIDUnknownErr untrustedLayerDiffIDUnknownError + switch { + case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable): + // options.TOCDigest is a private API, so all callers are within c/image, and should have called + // NoteOriginalOCIConfig first. + return false, private.ReusedBlob{}, fmt.Errorf("internal error: in TryReusingBlobWithOptions, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable") + case errors.As(err, &diffIDUnknownErr): + logrus.Debugf("Not using TOC %q to look for layer reuse: %v", options.TOCDigest, err) + // But don’t abort entirely, keep useTOCDigest = false, try a blobDigest match. + default: + return false, private.ReusedBlob{}, err + } + } else { + useTOCDigest = true + } } // lock the entire method as it executes fairly quickly s.lock.Lock() defer s.lock.Unlock() - if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil { + if options.SrcRef != nil && useTOCDigest { // Check if we have the layer in the underlying additional layer store. aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String()) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err) } else if err == nil { + // Compare the long comment in PutBlobPartial. We assume that the Additional Layer Store will, somehow, + // avoid layer “view” ambiguity. alsTOCDigest := aLayer.TOCDigest() if alsTOCDigest != options.TOCDigest { // FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could @@ -505,13 +643,13 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) } if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found { - s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest + s.lockProtected.blobDiffIDs[reused.Digest] = uncompressedDigest return true, reused, nil } } } - if options.TOCDigest != "" && options.LayerIndex != nil { + if useTOCDigest { // Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that. // Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse. uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest) @@ -532,6 +670,11 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err) } if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found { + if uncompressedDigest == "" && layers[0].UncompressedDigest != "" { + // Determine an uncompressed digest if at all possible, to use a traditional image ID + // and to maximize image reuse. + uncompressedDigest = layers[0].UncompressedDigest + } if uncompressedDigest != "" { s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest } @@ -568,13 +711,22 @@ func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest, // trustedLayerIdentityData is a _consistent_ set of information known about a single layer. type trustedLayerIdentityData struct { - layerIdentifiedByTOC bool // true if we decided the layer should be identified by tocDigest, false if by diffID + // true if we decided the layer should be identified by tocDigest, false if by diffID + // This can only be true if c/storage/pkg/chunked/toc.GetTOCDigest returns a value. + layerIdentifiedByTOC bool diffID digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC tocDigest digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted. } +// logString() prints a representation of trusted suitable identifying a layer in logs and errors. +// The string is already quoted to expose malicious input and does not need to be quoted again. +// Note that it does not include _all_ of the contents. +func (trusted trustedLayerIdentityData) logString() string { + return fmt.Sprintf("%q/%q/%q", trusted.blobDigest, trusted.tocDigest, trusted.diffID) +} + // trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest). // blobDigest is the (possibly-compressed) layer digest referenced in the manifest. // It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available. @@ -785,23 +937,6 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) return nil } -// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID, -// and an indication whether the input already has the shape of a layer ID. -// It returns ("", false) if the layer is not found at all (which should never happen) -func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) { - s.lock.Lock() - defer s.lock.Unlock() - - trusted, ok := s.trustedLayerIdentityDataLocked(layerIndex, blobDigest) - if !ok { - return "", false - } - if trusted.layerIdentifiedByTOC { - return "@TOC=" + trusted.tocDigest.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous. - } - return trusted.diffID.Encoded(), true // This looks like chain IDs, and it uses the traditional value. -} - // commitLayer commits the specified layer with the given index to the storage. // size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs. // @@ -813,16 +948,15 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig // must guarantee that, at any given time, at most one goroutine may execute // `commitLayer()`. func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) { - // Already committed? Return early. if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { return false, nil } - // Start with an empty string or the previous layer ID. Note that - // `s.indexToStorageID` can only be accessed by *one* goroutine at any - // given time. Hence, we don't need to lock accesses. - var parentLayer string + var parentLayer string // "" if no parent if index != 0 { + // s.indexToStorageID can only be written by this function, and our caller + // is responsible for ensuring it can be only be called by *one* goroutine at any + // given time. Hence, we don't need to lock accesses. prev, ok := s.indexToStorageID[index-1] if !ok { return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1) @@ -830,18 +964,17 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si parentLayer = prev } - // Carry over the previous ID for empty non-base layers. if info.emptyLayer { s.indexToStorageID[index] = parentLayer return false, nil } - // Check if there's already a layer with the ID that we'd give to the result of applying - // this layer blob to its parent, if it has one, or the blob's hex value otherwise. - // The layerID refers either to the DiffID or the digest of the TOC. - layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest) - if layerIDComponent == "" { - // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / … + // Collect trusted parameters of the layer. + s.lock.Lock() + trusted, ok := s.trustedLayerIdentityDataLocked(index, info.digest) + s.lock.Unlock() + if !ok { + // Check if the layer exists already and the caller just (incorrectly) forgot to pass it to us in a PutBlob() / TryReusingBlob() / … // // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller // that relies on using a blob digest that has never been seen by the store had better call @@ -865,23 +998,54 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String()) } - layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest) - if layerIDComponent == "" { + s.lock.Lock() + trusted, ok = s.trustedLayerIdentityDataLocked(index, info.digest) + s.lock.Unlock() + if !ok { return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String()) } } - id := layerIDComponent - if !layerIDComponentStandalone || parentLayer != "" { - id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded() + // Ensure that we always see the same “view” of a layer, as identified by the layer’s uncompressed digest, + // unless the user has explicitly opted out of this in storage.conf: see the more detailed explanation in PutBlobPartial. + if trusted.diffID != "" { + untrustedDiffID, err := s.untrustedLayerDiffID(index) + if err != nil { + var diffIDUnknownErr untrustedLayerDiffIDUnknownError + switch { + case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable): + logrus.Debugf("Skipping commit for layer %q, manifest not yet available for DiffID check", index) + return true, nil + case errors.As(err, &diffIDUnknownErr): + // If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations, + // so we could not have reused a TOC-identified layer nor have done a TOC-identified partial pull, + // i.e. there is no other “view” to worry about. Sanity-check that we really see the only expected view. + // + // Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case + // we _must_ fail if we used a TOC-identified layer - but PutBlobPartial should have already + // refused to do a partial pull, so we are in an inconsistent state. + if trusted.layerIdentifiedByTOC { + return false, fmt.Errorf("internal error: layer %d for blob %s was identified by TOC, but we don't have a DiffID in config", + index, trusted.logString()) + } + // else a schema1 image or a non-TOC layer with no ambiguity, let it through + default: + return false, err + } + } else if trusted.diffID != untrustedDiffID { + return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID) + } } + + id := layerID(parentLayer, trusted) + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { // There's already a layer that should have the right contents, just reuse it. s.indexToStorageID[index] = layer.ID return false, nil } - layer, err := s.createNewLayer(index, info.digest, parentLayer, id) + layer, err := s.createNewLayer(index, trusted, parentLayer, id) if err != nil { return false, err } @@ -892,32 +1056,62 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si return false, nil } -// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be ""). +// layerID computes a layer (“chain”) ID for (a possibly-empty parentID, trusted) +func layerID(parentID string, trusted trustedLayerIdentityData) string { + var component string + mustHash := false + if trusted.layerIdentifiedByTOC { + // "@" is not a valid start of a digest.Digest.Encoded(), so this is unambiguous with the !layerIdentifiedByTOC case. + // But we _must_ hash this below to get a Digest.Encoded()-formatted value. + component = "@TOC=" + trusted.tocDigest.Encoded() + mustHash = true + } else { + component = trusted.diffID.Encoded() // This looks like chain IDs, and it uses the traditional value. + } + + if parentID == "" && !mustHash { + return component + } + return digest.Canonical.FromString(parentID + "+" + component).Encoded() +} + +// createNewLayer creates a new layer newLayerID for (index, trusted) on top of parentLayer (which may be ""). // If the layer cannot be committed yet, the function returns (nil, nil). -func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) { +func (s *storageImageDestination) createNewLayer(index int, trusted trustedLayerIdentityData, parentLayer, newLayerID string) (*storage.Layer, error) { s.lock.Lock() diffOutput, ok := s.lockProtected.diffOutputs[index] s.lock.Unlock() if ok { - // If we know a trusted DiffID value (e.g. from a BlobInfoCache), set it in diffOutput. + // Typically, we compute a trusted DiffID value to authenticate the layer contents, see the detailed explanation + // in PutBlobPartial. If the user has opted out of that, but we know a trusted DiffID value + // (e.g. from a BlobInfoCache), set it in diffOutput. // That way it will be persisted in storage even if the cache is deleted; also - // we can use the value below to avoid the untrustedUncompressedDigest logic (and notably - // the costly commit delay until a manifest is available). - s.lock.Lock() - if d, ok := s.lockProtected.indexToDiffID[index]; ok { - diffOutput.UncompressedDigest = d + // we can use the value below to avoid the untrustedUncompressedDigest logic. + if diffOutput.UncompressedDigest == "" && trusted.diffID != "" { + diffOutput.UncompressedDigest = trusted.diffID } - s.lock.Unlock() var untrustedUncompressedDigest digest.Digest if diffOutput.UncompressedDigest == "" { d, err := s.untrustedLayerDiffID(index) if err != nil { - return nil, err - } - if d == "" { - logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID) - return nil, nil + var diffIDUnknownErr untrustedLayerDiffIDUnknownError + switch { + case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable): + logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID) + return nil, nil + case errors.As(err, &diffIDUnknownErr): + // If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations, + // so we should have !trusted.layerIdentifiedByTOC, i.e. we should have set + // diffOutput.UncompressedDigest above in this function, at the very latest. + // + // Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case + // commitLayer should have already refused this image when dealing with the “view” ambiguity. + return nil, fmt.Errorf("internal error: layer %d for blob %s was partially-pulled with unknown UncompressedDigest, but we don't have a DiffID in config", + index, trusted.logString()) + default: + return nil, err + } } untrustedUncompressedDigest = d @@ -965,19 +1159,17 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D // then we need to read the desired contents from a layer. var filename string var gotFilename bool - s.lock.Lock() - trusted, ok := s.trustedLayerIdentityDataLocked(index, layerDigest) - if ok && trusted.blobDigest != "" { + if trusted.blobDigest != "" { + s.lock.Lock() filename, gotFilename = s.lockProtected.filenames[trusted.blobDigest] - } - s.lock.Unlock() - if !ok { // We have already determined newLayerID, so the data must have been available. - return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest) + s.lock.Unlock() } var trustedOriginalDigest digest.Digest // For storage.LayerOptions + var trustedOriginalSize *int64 if gotFilename { // The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest. trustedOriginalDigest = trusted.blobDigest + trustedOriginalSize = nil // It’s s.lockProtected.fileSizes[trusted.blobDigest], but we don’t hold the lock now, and the consumer can compute it at trivial cost. } else { // Try to find the layer with contents matching the data we use. var layer *storage.Layer // = nil @@ -997,7 +1189,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D } } if layer == nil { - return nil, fmt.Errorf("layer for blob %q/%q/%q not found", trusted.blobDigest, trusted.tocDigest, trusted.diffID) + return nil, fmt.Errorf("layer for blob %s not found", trusted.logString()) } // Read the layer's contents. @@ -1007,7 +1199,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D } diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions) if err2 != nil { - return nil, fmt.Errorf("reading layer %q for blob %q/%q/%q: %w", layer.ID, trusted.blobDigest, trusted.tocDigest, trusted.diffID, err2) + return nil, fmt.Errorf("reading layer %q for blob %s: %w", layer.ID, trusted.logString(), err2) } // Copy the layer diff to a file. Diff() takes a lock that it holds // until the ReadCloser that it returns is closed, and PutLayer() wants @@ -1032,22 +1224,36 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D if trusted.diffID == "" && layer.UncompressedDigest != "" { trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now. } - // The stream we have is uncompressed, and it matches trusted.diffID (if known). - // - // FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse. - // But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created - // layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream). + + // Set the layer’s CompressedDigest/CompressedSize to relevant values if known, to allow more layer reuse. + // But we don’t want to just use the size from the manifest if we never saw the compressed blob, + // so that we don’t propagate mistakes / attacks. // - // We can legitimately set storage.LayerOptions.OriginalDigest to "", - // but that would just result in PutLayer computing the digest of the input stream == trusted.diffID. - // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation. - trustedOriginalDigest = trusted.diffID + // s.lockProtected.fileSizes[trusted.blobDigest] is not set, otherwise we would have found gotFilename. + // So, check if the layer we found contains that metadata. (If that layer continues to exist, there’s no benefit + // to us propagating the metadata; but that layer could be removed, and in that case propagating the metadata to + // this new layer copy can help.) + if trusted.blobDigest != "" && layer.CompressedDigest == trusted.blobDigest && layer.CompressedSize > 0 { + trustedOriginalDigest = trusted.blobDigest + sizeCopy := layer.CompressedSize + trustedOriginalSize = &sizeCopy + } else { + // The stream we have is uncompressed, and it matches trusted.diffID (if known). + // + // We can legitimately set storage.LayerOptions.OriginalDigest to "", + // but that would just result in PutLayer computing the digest of the input stream == trusted.diffID. + // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation. + trustedOriginalDigest = trusted.diffID + trustedOriginalSize = nil // Probably layer.UncompressedSize, but the consumer can compute it at trivial cost. + } // Allow using the already-collected layer contents without extracting the layer again. // // This only matches against the uncompressed digest. - // We don’t have the original compressed data here to trivially set filenames[layerDigest]. - // In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API. + // If we have trustedOriginalDigest == trusted.blobDigest, we could arrange to reuse the + // same uncompressed stream for future calls of createNewLayer; but for the non-layer blobs (primarily the config), + // we assume that the file at filenames[someDigest] matches someDigest _exactly_; we would need to differentiate + // between “original files” and “possibly uncompressed files”. // Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity. if trusted.diffID != "" { s.lock.Lock() @@ -1067,55 +1273,128 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{ OriginalDigest: trustedOriginalDigest, + OriginalSize: trustedOriginalSize, // nil in many cases // This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream. UncompressedDigest: trusted.diffID, }, file) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return nil, fmt.Errorf("adding layer with blob %q/%q/%q: %w", trusted.blobDigest, trusted.tocDigest, trusted.diffID, err) + return nil, fmt.Errorf("adding layer with blob %s: %w", trusted.logString(), err) } return layer, nil } +// uncommittedImageSource allows accessing an image’s metadata (not layers) before it has been committed, +// to allow using image.FromUnparsedImage. +type uncommittedImageSource struct { + srcImpl.Compat + srcImpl.PropertyMethodsInitialize + srcImpl.NoSignatures + srcImpl.DoesNotAffectLayerInfosForCopy + srcStubs.NoGetBlobAtInitialize + + d *storageImageDestination +} + +func newUncommittedImageSource(d *storageImageDestination) *uncommittedImageSource { + s := &uncommittedImageSource{ + PropertyMethodsInitialize: srcImpl.PropertyMethods(srcImpl.Properties{ + HasThreadSafeGetBlob: true, + }), + NoGetBlobAtInitialize: srcStubs.NoGetBlobAt(d.Reference()), + + d: d, + } + s.Compat = srcImpl.AddCompat(s) + return s +} + +func (u *uncommittedImageSource) Reference() types.ImageReference { + return u.d.Reference() +} + +func (u *uncommittedImageSource) Close() error { + return nil +} + +func (u *uncommittedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + return u.d.manifest, u.d.manifestMIMEType, nil +} + +func (u *uncommittedImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + blob, err := u.d.getConfigBlob(info) + if err != nil { + return nil, -1, err + } + return io.NopCloser(bytes.NewReader(blob)), int64(len(blob)), nil +} + +// errUntrustedLayerDiffIDNotYetAvailable is returned by untrustedLayerDiffID +// if the value is not yet available (but it can be available after s.manifests is set). +// This should only happen for external callers of the transport, not for c/image/copy. +// +// Callers of untrustedLayerDiffID before PutManifest must handle this error specially; +// callers after PutManifest can use the default, reporting an internal error. +var errUntrustedLayerDiffIDNotYetAvailable = errors.New("internal error: untrustedLayerDiffID has no value available and fallback was not implemented") + +// untrustedLayerDiffIDUnknownError is returned by untrustedLayerDiffID +// if the image’s format does not provide DiffIDs. +type untrustedLayerDiffIDUnknownError struct { + layerIndex int +} + +func (e untrustedLayerDiffIDUnknownError) Error() string { + return fmt.Sprintf("DiffID value for layer %d is unknown or explicitly empty", e.layerIndex) +} + // untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config. -// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil). -// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication. +// It may return two special errors, errUntrustedLayerDiffIDNotYetAvailable or untrustedLayerDiffIDUnknownError. +// +// WARNING: This function does not even validate that the returned digest has a valid format. +// WARNING: We don’t _always_ validate this DiffID value against the layer contents; it must not be used for any deduplication. func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) { - // At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and - // nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil. + // At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, + // nothing is writing to s.manifest yet, and s.untrustedDiffIDValues might have been set + // by NoteOriginalOCIConfig and are not being updated any more; + // or PutManifest has been called and s.manifest != nil. // Either way this function does not need the protection of s.lock. - if s.manifest == nil { - return "", nil - } if s.untrustedDiffIDValues == nil { - mt := manifest.GuessMIMEType(s.manifest) - if mt != imgspecv1.MediaTypeImageManifest { - // We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage, - // and then use types.Image.OCIConfig so that we can parse the image. - // - // In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation), - // while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull). - // So it is not implemented yet. - return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt) + // Typically, we expect untrustedDiffIDValues to be set by the generic copy code + // via NoteOriginalOCIConfig; this is a compatibility fallback for external callers + // of the public types.ImageDestination. + if s.manifest == nil { + return "", errUntrustedLayerDiffIDNotYetAvailable } - man, err := manifest.FromBlob(s.manifest, mt) + + ctx := context.Background() // This is all happening in memory, no need to worry about cancellation. + unparsed := image.UnparsedInstance(newUncommittedImageSource(s), nil) + sourced, err := image.FromUnparsedImage(ctx, nil, unparsed) if err != nil { - return "", fmt.Errorf("parsing manifest: %w", err) + return "", fmt.Errorf("parsing image to be committed: %w", err) } - - cb, err := s.getConfigBlob(man.ConfigInfo()) + configOCI, err := sourced.OCIConfig(ctx) if err != nil { - return "", err + return "", fmt.Errorf("obtaining config of image to be committed: %w", err) } - // retrieve the expected uncompressed digest from the config blob. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(cb, configOCI); err != nil { - return "", err - } - s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs) - if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory… - s.untrustedDiffIDValues = []digest.Digest{} + s.setUntrustedDiffIDValuesFromOCIConfig(configOCI) + } + + // Let entirely empty / missing diffIDs through; but if the array does exist, expect it to contain an entry for every layer, + // and fail hard on missing entries. This tries to account for completely naive image producers who just don’t fill DiffID, + // while still detecting incorrectly-built / confused images. + // + // schema1 images don’t have DiffID values in the config. + // Our schema1.OCIConfig code produces non-empty DiffID arrays of empty values, so treat arrays of all-empty + // values as “DiffID unknown”. + // For schema 1, it is important to exit here, before the layerIndex >= len(s.untrustedDiffIDValues) + // check, because the format conversion from schema1 to OCI used to compute untrustedDiffIDValues + // changes the number of layres (drops items with Schema1V1Compatibility.ThrowAway). + if !slices.ContainsFunc(s.untrustedDiffIDValues, func(d digest.Digest) bool { + return d != "" + }) { + return "", untrustedLayerDiffIDUnknownError{ + layerIndex: layerIndex, } } if layerIndex >= len(s.untrustedDiffIDValues) { @@ -1124,6 +1403,15 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D return s.untrustedDiffIDValues[layerIndex], nil } +// setUntrustedDiffIDValuesFromOCIConfig updates s.untrustedDiffIDvalues from config. +// The caller must ensure s.lock does not need to be held. +func (s *storageImageDestination) setUntrustedDiffIDValuesFromOCIConfig(config *imgspecv1.Image) { + s.untrustedDiffIDValues = slices.Clone(config.RootFS.DiffIDs) + if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory… + s.untrustedDiffIDValues = []digest.Digest{} + } +} + // CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before CommitWithOptions() is called @@ -1131,7 +1419,7 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { // This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. - if len(s.manifest) == 0 { + if s.manifest == nil { return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()") } toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx) @@ -1159,7 +1447,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options } } // Find the list of layer blobs. - man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + man, err := manifest.FromBlob(s.manifest, s.manifestMIMEType) if err != nil { return fmt.Errorf("parsing manifest: %w", err) } @@ -1193,29 +1481,21 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options imgOptions.CreationDate = *inspect.Created } - // Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so - // we just need to screen out the ones that are actually layers to get the list of non-layers. - dataBlobs := set.New[digest.Digest]() - for blob := range s.lockProtected.filenames { - dataBlobs.Add(blob) - } - for _, layerBlob := range layerBlobs { - dataBlobs.Delete(layerBlob.Digest) - } - for _, blob := range dataBlobs.Values() { - v, err := os.ReadFile(s.lockProtected.filenames[blob]) + // Set up to save the config as a data item. Since we only share layers, the config should be in a file. + if s.lockProtected.configDigest != "" { + v, err := os.ReadFile(s.lockProtected.filenames[s.lockProtected.configDigest]) if err != nil { - return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) + return fmt.Errorf("copying config blob %q to image: %w", s.lockProtected.configDigest, err) } imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ - Key: blob.String(), + Key: s.lockProtected.configDigest.String(), Data: v, Digest: digest.Canonical.FromBytes(v), }) } // Set up to save the options.UnparsedToplevel's manifest if it differs from // the per-platform one, which is saved below. - if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) { + if !bytes.Equal(toplevelManifest, s.manifest) { manifestDigest, err := manifest.Digest(toplevelManifest) if err != nil { return fmt.Errorf("digesting top-level manifest: %w", err) @@ -1370,6 +1650,10 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob return err } s.manifest = bytes.Clone(manifestBlob) + if s.manifest == nil { // Make sure PutManifest can never succeed with s.manifest == nil + s.manifest = []byte{} + } + s.manifestMIMEType = manifest.GuessMIMEType(s.manifest) s.manifestDigest = digest return nil } @@ -1392,7 +1676,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s if instanceDigest == nil { s.signatures = sigblob s.metadata.SignatureSizes = sizes - if len(s.manifest) > 0 { + if s.manifest != nil { manifestDigest := s.manifestDigest instanceDigest = &manifestDigest } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index 7e368857..e7ba1dd2 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -1,5 +1,4 @@ //go:build !containers_image_storage_stub -// +build !containers_image_storage_stub package storage diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index acc4cb30..5e3ff9e6 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -1,5 +1,4 @@ //go:build !containers_image_storage_stub -// +build !containers_image_storage_stub package storage @@ -153,7 +152,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag } if s.id == "" { logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) - return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage) + // %.0w makes the error visible to error.Unwrap() without including any text. + // ErrNoSuchImage ultimately is “identifier is not an image”, which is not helpful for identifying the root cause. + return nil, fmt.Errorf("reference %q does not resolve to an image ID%.0w", s.StringWithinTransport(), ErrNoSuchImage) } if loadedImage == nil { img, err := s.transport.store.Image(s.id) diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index 55788f88..7cedf1d8 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -1,5 +1,4 @@ //go:build !containers_image_storage_stub -// +build !containers_image_storage_stub package storage @@ -35,13 +34,14 @@ type storageImageSource struct { impl.PropertyMethodsInitialize stubs.NoGetBlobAtInitialize - imageRef storageReference - image *storage.Image - systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files - metadata storageImageMetadata - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions - getBlobMutexProtected getBlobMutexProtected + imageRef storageReference + image *storage.Image + systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files + metadata storageImageMetadata + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + cachedManifestMIMEType string // Valid if cachedManifest != nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + getBlobMutexProtected getBlobMutexProtected } // getBlobMutexProtected contains storageImageSource data protected by getBlobMutex. @@ -247,7 +247,7 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di } return blob, manifest.GuessMIMEType(blob), err } - if len(s.cachedManifest) == 0 { + if s.cachedManifest == nil { // The manifest is stored as a big data item. // Prefer the manifest corresponding to the user-specified digest, if available. if s.imageRef.named != nil { @@ -267,15 +267,16 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di } // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). - if len(s.cachedManifest) == 0 { + if s.cachedManifest == nil { cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) if err != nil { return nil, "", err } s.cachedManifest = cachedBlob } + s.cachedManifestMIMEType = manifest.GuessMIMEType(s.cachedManifest) } - return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err + return s.cachedManifest, s.cachedManifestMIMEType, err } // LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go index b981953a..a66a1d19 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_transport.go +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -1,5 +1,4 @@ //go:build !containers_image_storage_stub -// +build !containers_image_storage_stub package storage @@ -362,15 +361,14 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { } storeSpec := scope[1:closeIndex] scope = scope[closeIndex+1:] - storeInfo := strings.SplitN(storeSpec, "@", 2) - if len(storeInfo) == 1 && storeInfo[0] != "" { - // One component: the graph root. - if !filepath.IsAbs(storeInfo[0]) { + if a, b, ok := strings.Cut(storeSpec, "@"); ok && a != "" && b != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(b) { return ErrPathNotAbsolute } - } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { - // Two components: the driver type and the graph root. - if !filepath.IsAbs(storeInfo[1]) { + } else if !ok && a != "" { + // One component: the graph root. + if !filepath.IsAbs(storeSpec) { return ErrPathNotAbsolute } } else { diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 7d4a83bc..283a32d0 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -14,8 +14,9 @@ import ( "github.com/containers/image/v5/internal/imagesource/impl" "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/pkg/compression" + compressionTypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" - "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" imgspecs "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -82,31 +83,47 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System } } - // Default to assuming the layer is compressed. - layerType := imgspecv1.MediaTypeImageLayerGzip - // Set up to digest the file as it is. blobIDdigester := digest.Canonical.Digester() reader = io.TeeReader(reader, blobIDdigester.Hash()) - // Set up to digest the file after we maybe decompress it. - diffIDdigester := digest.Canonical.Digester() - uncompressed, err := pgzip.NewReader(reader) - if err == nil { - // It is compressed, so the diffID is the digest of the uncompressed version - reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) - } else { - // It is not compressed, so the diffID and the blobID are going to be the same - diffIDdigester = blobIDdigester - layerType = imgspecv1.MediaTypeImageLayer - uncompressed = nil - } - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - if _, err := io.Copy(io.Discard, reader); err != nil { - return nil, fmt.Errorf("error reading %q: %w", filename, err) - } - if uncompressed != nil { - uncompressed.Close() + var layerType string + var diffIDdigester digest.Digester + // If necessary, digest the file after we decompress it. + if err := func() error { // A scope for defer + format, decompressor, reader, err := compression.DetectCompressionFormat(reader) + if err != nil { + return err + } + if decompressor != nil { + uncompressed, err := decompressor(reader) + if err != nil { + return err + } + defer uncompressed.Close() + // It is compressed, so the diffID is the digest of the uncompressed version + diffIDdigester = digest.Canonical.Digester() + reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) + switch format.Name() { + case compressionTypes.GzipAlgorithmName: + layerType = imgspecv1.MediaTypeImageLayerGzip + case compressionTypes.ZstdAlgorithmName: + layerType = imgspecv1.MediaTypeImageLayerZstd + default: // This is incorrect, but we have no good options, and it is what this transport was historically doing. + layerType = imgspecv1.MediaTypeImageLayerGzip + } + } else { + // It is not compressed, so the diffID and the blobID are going to be the same + diffIDdigester = blobIDdigester + layerType = imgspecv1.MediaTypeImageLayer + } + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + if _, err := io.Copy(io.Discard, reader); err != nil { + return fmt.Errorf("error reading %q: %w", filename, err) + } + return nil + }(); err != nil { + return nil, err } // Grab our uncompressed and possibly-compressed digests and sizes. diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index 0f4b7e32..0823a619 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -19,10 +19,13 @@ import ( _ "github.com/containers/image/v5/sif" _ "github.com/containers/image/v5/tarball" // The docker-daemon transport is registeredy by docker_daemon*.go - // The ostree transport is registered by ostree*.go // The storage transport is registered by storage*.go ) +func init() { + transports.Register(transports.NewStubTransport("ostree")) // This transport was completely removed. +} + // ParseImageName converts a URL-like image name to a types.ImageReference. func ParseImageName(imgName string) (types.ImageReference, error) { // Keep this in sync with TransportFromImageName! diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go index ffac6e0b..3a8caeeb 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go @@ -1,5 +1,4 @@ //go:build !containers_image_docker_daemon_stub -// +build !containers_image_docker_daemon_stub package alltransports diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go index ddc347bf..8e470c80 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go @@ -1,5 +1,4 @@ //go:build containers_image_docker_daemon_stub -// +build containers_image_docker_daemon_stub package alltransports diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go deleted file mode 100644 index 2340702b..00000000 --- a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build containers_image_ostree && linux -// +build containers_image_ostree,linux - -package alltransports - -import ( - // Register the ostree transport - _ "github.com/containers/image/v5/ostree" -) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go deleted file mode 100644 index 8c417518..00000000 --- a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !containers_image_ostree || !linux -// +build !containers_image_ostree !linux - -package alltransports - -import "github.com/containers/image/v5/transports" - -func init() { - transports.Register(transports.NewStubTransport("ostree")) -} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go index 1e399cdb..4b27e059 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go @@ -1,5 +1,4 @@ //go:build !containers_image_storage_stub -// +build !containers_image_storage_stub package alltransports diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go index 30802661..b021c1b5 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go @@ -1,5 +1,4 @@ //go:build containers_image_storage_stub -// +build containers_image_storage_stub package alltransports diff --git a/vendor/github.com/containers/image/v5/transports/transports.go b/vendor/github.com/containers/image/v5/transports/transports.go index 834f33b4..4c9c0889 100644 --- a/vendor/github.com/containers/image/v5/transports/transports.go +++ b/vendor/github.com/containers/image/v5/transports/transports.go @@ -72,7 +72,7 @@ func ImageName(ref types.ImageReference) string { return ref.Transport().Name() + ":" + ref.StringWithinTransport() } -var deprecatedTransports = set.NewWithValues("atomic") +var deprecatedTransports = set.NewWithValues("atomic", "ostree") // ListNames returns a list of non deprecated transport names. // Deprecated transports can be used, but are not presented to users. diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 9a7a0da2..a9395178 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -3,6 +3,7 @@ package types import ( "context" "io" + "net/url" "time" "github.com/containers/image/v5/docker/reference" @@ -241,6 +242,7 @@ type BlobInfoCache interface { // // WARNING: Various methods which return an object identified by digest generally do not // validate that the returned data actually matches that digest; this is the caller’s responsibility. +// See the individual methods’ documentation for potentially more details. type ImageSource interface { // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. @@ -251,10 +253,17 @@ type ImageSource interface { // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + // + // WARNING: This is a raw access to the data as provided by the source; if the reference contains a digest, or instanceDigest is set, + // callers must enforce the digest match themselves, typically by using image.UnparsedInstance to access the manifest instead + // of calling this directly. (Compare the generic warning applicable to all of the [ImageSource] interface.) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + // + // WARNING: This is a raw access to the data as provided by the source; callers must validate the contents + // against the blob’s digest themselves. (Compare the generic warning applicable to all of the [ImageSource] interface.) GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. HasThreadSafeGetBlob() bool @@ -650,11 +659,15 @@ type SystemContext struct { // If true, the physical pull source of docker transport images logged as info level DockerLogMirrorChoice bool // Directory to use for OSTree temporary files + // + // Deprecated: The OSTree transport has been removed. OSTreeTmpDirPath string // If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry. // Note that this requires writing blobs to temporary files, and takes more time than the default behavior, // when the digest for a blob is unknown. DockerRegistryPushPrecomputeDigests bool + // DockerProxyURL specifies proxy configuration schema (like socks5://username:password@ip:port) + DockerProxyURL *url.URL // === docker/daemon.Transport overrides === // A directory containing a CA certificate (ending with ".crt"), diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 19f708ad..bfff8a34 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 33 + VersionMinor = 36 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 1 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go index 603925df..7ca32fc8 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go @@ -25,7 +25,7 @@ import ( "github.com/containers/ocicrypt/config" "github.com/containers/ocicrypt/keywrap" "github.com/containers/ocicrypt/utils" - "go.mozilla.org/pkcs7" + "github.com/smallstep/pkcs7" ) type pkcs7KeyWrapper struct { diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 0fed08c3..623ede2d 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,13 +17,13 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-39" + FEDORA_NAME: "fedora-42" DEBIAN_NAME: "debian-13" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20241010t105554z-f40f39d13" + IMAGE_SUFFIX: "c20250422t130822z-f42f41d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -126,8 +126,9 @@ lint_task: folder: $GOPATH/pkg/mod build_script: | apt-get update - apt-get install -y libbtrfs-dev + apt-get install -y libbtrfs-dev libsubid-dev test_script: | + [ -n "${CIRRUS_BASE_SHA}" ] && git fetch origin ${CIRRUS_BASE_SHA} # Make ${CIRRUS_BASE_SHA} resolvable for git-validation make TAGS=regex_precompile local-validate make lint make clean @@ -170,13 +171,13 @@ vendor_task: cross_task: alias: cross container: - image: golang:1.22 + image: golang:1.23 build_script: make cross gofix_task: alias: gofix container: - image: golang:1.22 + image: golang:1.23 build_script: go fix ./... test_script: git diff --exit-code diff --git a/vendor/github.com/containers/storage/.golangci.yml b/vendor/github.com/containers/storage/.golangci.yml index ec11f5da..ad4dec0c 100644 --- a/vendor/github.com/containers/storage/.golangci.yml +++ b/vendor/github.com/containers/storage/.golangci.yml @@ -1,7 +1,20 @@ ---- -run: - concurrency: 6 - timeout: 5m -linters: +version: "2" + +formatters: enable: - gofumpt + +linters: + enable: + - nolintlint + - unconvert + exclusions: + presets: + - comments + - std-error-handling + settings: + staticcheck: + checks: + - all + - -ST1003 # https://staticcheck.dev/docs/checks/#ST1003 Poorly chosen identifier. + - -QF1008 # https://staticcheck.dev/docs/checks/#QF1008 Omit embedded fields from selector expression. diff --git a/vendor/github.com/containers/storage/CONTRIBUTING.md b/vendor/github.com/containers/storage/CONTRIBUTING.md index 5364be76..f50eb880 100644 --- a/vendor/github.com/containers/storage/CONTRIBUTING.md +++ b/vendor/github.com/containers/storage/CONTRIBUTING.md @@ -1,123 +1,14 @@ -# Contributing to Containers/Storage +# Contributing to Containers/Storage -We'd love to have you join the community! Below summarizes the processes -that we follow. +We'd love to have you join the community! [Learn how to contribute](https://github.com/containers/common/blob/main/CONTRIBUTING.md) to the Containers Group Projects. -## Topics +Please note that the following information is specific to this project: -* [Reporting Issues](#reporting-issues) -* [Submitting Pull Requests](#submitting-pull-requests) -* [Communications](#communications) - - -## Reporting Issues - -Before reporting an issue, check our backlog of -[open issues](https://github.com/containers/storage/issues) -to see if someone else has already reported it. If so, feel free to add -your scenario, or additional information, to the discussion. Or simply -"subscribe" to it to be notified when it is updated. - -If you find a new issue with the project we'd love to hear about it! The most -important aspect of a bug report is that it includes enough information for -us to reproduce it. So, please include as much detail as possible and try -to remove the extra stuff that doesn't really relate to the issue itself. -The easier it is for us to reproduce it, the faster it'll be fixed! - -Please don't include any private/sensitive information in your issue! - -## Submitting Pull Requests - -No Pull Request (PR) is too small! Typos, additional comments in the code, -new testcases, bug fixes, new features, more documentation, ... it's all -welcome! - -While bug fixes can first be identified via an "issue", that is not required. -It's ok to just open up a PR with the fix, but make sure you include the same -information you would have included in an issue - like how to reproduce it. - -PRs for new features should include some background on what use cases the -new code is trying to address. When possible and when it makes sense, try to break-up -larger PRs into smaller ones - it's easier to review smaller -code changes. But only if those smaller ones make sense as stand-alone PRs. - -Regardless of the type of PR, all PRs should include: -* well documented code changes -* additional testcases. Ideally, they should fail w/o your code change applied -* documentation changes - -Squash your commits into logical pieces of work that might want to be reviewed -separate from the rest of the PRs. But, squashing down to just one commit is ok -too since in the end the entire PR will be reviewed anyway. When in doubt, -squash. - -PRs that fix issues should include a reference like `Closes #XXXX` in the -commit message so that github will automatically close the referenced issue -when the PR is merged. - - - -### Sign your PRs - -The sign-off is a line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. +* We don’t typically require 2 LGTMs for this repository. ## Communications -For general questions, or discussions, please use the +For general questions, or discussions, please use the IRC group on `irc.freenode.net` called `container-projects` that has been setup. @@ -139,6 +30,6 @@ approval, or if the person requests to be removed then it is automatic. Normally, a maintainer will only be removed if they are considered to be inactive for a long period of time or are viewed as disruptive to the community. -The current list of maintainers can be found in the +The current list of maintainers can be found in the [MAINTAINERS](MAINTAINERS) file. --> diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 64c01b67..5da38efd 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de # N/B: This value is managed by Renovate, manual changes are # possible, as long as they don't disturb the formatting # (i.e. DO NOT ADD A 'v' prefix!) -GOLANGCI_LINT_VERSION := 1.61.0 +GOLANGCI_LINT_VERSION := 2.2.1 default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 43c989b5..bb120e87 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.56.1 +1.59.0 diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go index 396648e7..e8837ff9 100644 --- a/vendor/github.com/containers/storage/check.go +++ b/vendor/github.com/containers/storage/check.go @@ -80,7 +80,7 @@ type CheckOptions struct { // layer to the contents that we'd expect it to have to ignore certain // discrepancies type checkIgnore struct { - ownership, timestamps, permissions bool + ownership, timestamps, permissions, filetype bool } // CheckMost returns a CheckOptions with mostly just "quick" checks enabled. @@ -139,8 +139,10 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) { if strings.Contains(o, "ignore_chown_errors=true") { ignore.ownership = true } - if strings.HasPrefix(o, "force_mask=") { + if strings.Contains(o, "force_mask=") { + ignore.ownership = true ignore.permissions = true + ignore.filetype = true } } for o := range s.pullOptions { @@ -833,7 +835,7 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error { // compareFileInfo returns a string summarizing what's different between the two checkFileInfos func compareFileInfo(a, b checkFileInfo, idmap *idtools.IDMappings, ignore checkIgnore) string { var comparison []string - if a.typeflag != b.typeflag { + if a.typeflag != b.typeflag && !ignore.filetype { comparison = append(comparison, fmt.Sprintf("filetype:%v→%v", a.typeflag, b.typeflag)) } if idmap != nil && !idmap.Empty() { diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 143fde29..21341f1e 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -82,7 +82,7 @@ type Container struct { UIDMap []idtools.IDMap `json:"uidmap,omitempty"` GIDMap []idtools.IDMap `json:"gidmap,omitempty"` - Flags map[string]interface{} `json:"flags,omitempty"` + Flags map[string]any `json:"flags,omitempty"` // volatileStore is true if the container is from the volatile json file volatileStore bool `json:"-"` @@ -196,7 +196,7 @@ func (c *Container) MountOpts() []string { switch value := c.Flags[mountOptsFlag].(type) { case []string: return value - case []interface{}: + case []any: var mountOpts []string for _, v := range value { if flag, ok := v.(string); ok { @@ -458,7 +458,7 @@ func (r *containerStore) load(lockedForWriting bool) (bool, error) { ids := make(map[string]*Container) - for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ { + for locationIndex := range numContainerLocationIndex { location := containerLocationFromIndex(locationIndex) rpath := r.jsonPath[locationIndex] @@ -531,7 +531,7 @@ func (r *containerStore) save(saveLocations containerLocations) error { return err } r.lastWrite = lw - for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ { + for locationIndex := range numContainerLocationIndex { location := containerLocationFromIndex(locationIndex) if location&saveLocations == 0 { continue @@ -641,13 +641,13 @@ func (r *containerStore) ClearFlag(id string, flag string) error { } // Requires startWriting. -func (r *containerStore) SetFlag(id string, flag string, value interface{}) error { +func (r *containerStore) SetFlag(id string, flag string, value any) error { container, ok := r.lookup(id) if !ok { return ErrContainerUnknown } if container.Flags == nil { - container.Flags = make(map[string]interface{}) + container.Flags = make(map[string]any) } container.Flags[flag] = value return r.saveFor(container) diff --git a/vendor/github.com/containers/storage/deprecated.go b/vendor/github.com/containers/storage/deprecated.go index 76ae6328..beb82b6b 100644 --- a/vendor/github.com/containers/storage/deprecated.go +++ b/vendor/github.com/containers/storage/deprecated.go @@ -111,7 +111,7 @@ type LayerBigDataStore interface { // Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality. type FlaggableStore interface { ClearFlag(id string, flag string) error - SetFlag(id string, flag string, value interface{}) error + SetFlag(id string, flag string, value any) error } // ContainerStore is a deprecated interface with no documented way to use it from callers outside of c/storage. @@ -195,8 +195,8 @@ type LayerStore interface { FlaggableStore RWLayerBigDataStore Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error) - CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) - Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) + CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]any) (layer *Layer, err error) + Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]any, diff io.Reader) (*Layer, int64, error) SetNames(id string, names []string) error AddNames(id string, names []string) error RemoveNames(id string, names []string) error @@ -207,7 +207,6 @@ type LayerStore interface { Mounted(id string) (int, error) ParentOwners(id string) (uids, gids []int, err error) ApplyDiff(to string, diff io.Reader) (int64, error) - ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) DifferTarget(id string) (string, error) LoadLocked() error PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index a8312811..5925c9da 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -36,6 +36,7 @@ import ( "time" graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/internal/tempdir" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" @@ -517,7 +518,7 @@ func (a *Driver) isParent(id, parent string) bool { if parent == "" && len(parents) > 0 { return false } - return !(len(parents) > 0 && parent != parents[0]) + return len(parents) == 0 || parent == parents[0] } // Diff produces an archive of the changes between the specified @@ -772,7 +773,23 @@ func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp return fmt.Errorf("aufs doesn't support changing ID mappings") } -// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS -func (a *Driver) SupportsShifting() bool { +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs to the provided mapping in an userNS +func (a *Driver) SupportsShifting(uidmap, gidmap []idtools.IDMap) bool { return false } + +// Dedup performs deduplication of the driver's storage. +func (a *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + return graphdriver.DedupResult{}, nil +} + +// DeferredRemove is not implemented. +// It calls Remove directly. +func (a *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) { + return nil, a.Remove(id) +} + +// GetTempDirRootDirs is not implemented. +func (a *Driver) GetTempDirRootDirs() []string { + return []string{} +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index c7f37d97..95bbe8cc 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -30,6 +30,7 @@ import ( "unsafe" graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/internal/tempdir" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" @@ -673,3 +674,19 @@ func (d *Driver) ListLayers() ([]string, error) { func (d *Driver) AdditionalImageStores() []string { return nil } + +// Dedup performs deduplication of the driver's storage. +func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + return graphdriver.DedupResult{}, nil +} + +// DeferredRemove is not implemented. +// It calls Remove directly. +func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) { + return nil, d.Remove(id) +} + +// GetTempDirRootDirs is not implemented. +func (d *Driver) GetTempDirRootDirs() []string { + return []string{} +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go index 4f5d8a5b..d8f48764 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/version.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go @@ -1,4 +1,4 @@ -//go:build linux && !btrfs_noversion && cgo +//go:build linux && cgo package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go deleted file mode 100644 index 58c1b0d0..00000000 --- a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build linux && btrfs_noversion && cgo - -package btrfs - -// TODO(vbatts) remove this work-around once supported linux distros are on -// btrfs utilities of >= 3.16.1 - -func btrfsBuildVersion() string { - return "-" -} - -func btrfsLibVersion() int { - return -1 -} diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index d728e919..df602760 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -131,7 +131,7 @@ func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost return ChownPathByMaps(layerFs, toContainer, toHost) } -// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS -func (n *naiveLayerIDMapUpdater) SupportsShifting() bool { +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs to the provided mapping in an userNS +func (n *naiveLayerIDMapUpdater) SupportsShifting(uidmap, gidmap []idtools.IDMap) bool { return false } diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go index 882fa04b..4f275020 100644 --- a/vendor/github.com/containers/storage/drivers/chown_darwin.go +++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go @@ -83,7 +83,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai } if uid != int(st.Uid) || gid != int(st.Gid) { capability, err := system.Lgetxattr(path, "security.capability") - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { return fmt.Errorf("%s: %w", os.Args[0], err) } diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go index 808f4fea..d5a58a50 100644 --- a/vendor/github.com/containers/storage/drivers/chown_unix.go +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -36,8 +36,8 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai } i := inode{ - Dev: uint64(st.Dev), - Ino: uint64(st.Ino), + Dev: uint64(st.Dev), //nolint:unconvert + Ino: st.Ino, } c.mutex.Lock() @@ -101,7 +101,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai } if uid != int(st.Uid) || gid != int(st.Gid) { cap, err := system.Lgetxattr(path, "security.capability") - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.ENOTSUP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { return fmt.Errorf("%s: %w", os.Args[0], err) } diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index 8c0bbed6..c94cb5e1 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -40,7 +40,7 @@ const ( ) // CopyRegularToFile copies the content of a file to another -func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive srcFile, err := os.Open(srcPath) if err != nil { return err @@ -72,7 +72,7 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c } // CopyRegular copies the content of a file to another -func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: revive,golint +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive // If the destination file already exists, we shouldn't blow it away dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) if err != nil { @@ -106,7 +106,7 @@ func legacyCopy(srcFile io.Reader, dstFile io.Writer) error { func copyXattr(srcPath, dstPath, attr string) error { data, err := system.Lgetxattr(srcPath, attr) - if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + if err != nil && !errors.Is(err, system.ENOTSUP) { return err } if data != nil { @@ -160,7 +160,10 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { switch mode := f.Mode(); { case mode.IsRegular(): - id := fileID{dev: uint64(stat.Dev), ino: stat.Ino} + id := fileID{ + dev: uint64(stat.Dev), //nolint:unconvert + ino: stat.Ino, + } if copyMode == Hardlink { isHardlink = true if err2 := os.Link(srcPath, dstPath); err2 != nil { @@ -242,12 +245,11 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { } // system.Chtimes doesn't support a NOFOLLOW flag atm - // nolint: unconvert if f.IsDir() { dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat}) } else if !isSymlink { - aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + aTime := time.Unix(stat.Atim.Unix()) + mTime := time.Unix(stat.Mtim.Unix()) if err := system.Chtimes(dstPath, aTime, mTime); err != nil { return err } @@ -279,7 +281,7 @@ func doCopyXattrs(srcPath, dstPath string) error { } xattrs, err := system.Llistxattr(srcPath) - if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + if err != nil && !errors.Is(err, system.ENOTSUP) { return err } diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go index baaa86dd..191b355d 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go @@ -24,7 +24,7 @@ func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error { } // CopyRegularToFile copies the content of a file to another -func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive,golint // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters" +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters" f, err := os.Open(srcPath) if err != nil { return err @@ -35,6 +35,6 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c } // CopyRegular copies the content of a file to another -func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive,golint // "func name will be used as copy.CopyRegular by other packages, and that stutters" +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive // "func name will be used as copy.CopyRegular by other packages, and that stutters" return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath) } diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index dc9b12c3..55377808 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -8,6 +8,8 @@ import ( "path/filepath" "strings" + "github.com/containers/storage/internal/dedup" + "github.com/containers/storage/internal/tempdir" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fileutils" @@ -53,8 +55,8 @@ type MountOpts struct { // Mount label is the MAC Labels to assign to mount point (SELINUX) MountLabel string // UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point - UidMaps []idtools.IDMap //nolint: revive,golint - GidMaps []idtools.IDMap //nolint: revive,golint + UidMaps []idtools.IDMap //nolint: revive + GidMaps []idtools.IDMap //nolint: revive Options []string // Volatile specifies whether the container storage can be optimized @@ -78,7 +80,24 @@ type ApplyDiffOpts struct { type ApplyDiffWithDifferOpts struct { ApplyDiffOpts - Flags map[string]interface{} + Flags map[string]any +} + +// DedupArgs contains the information to perform storage deduplication. +type DedupArgs struct { + // Layers is the list of layers to deduplicate. + Layers []string + + // Options that are passed directly to the pkg/dedup.DedupDirs function. + Options dedup.DedupOptions +} + +// DedupResult contains the result of the Dedup() call. +type DedupResult struct { + // Deduped represents the total number of bytes saved by deduplication. + // This value accounts also for all previously deduplicated data, not only the savings + // from the last run. + Deduped uint64 } // InitFunc initializes the storage driver. @@ -105,7 +124,17 @@ type ProtoDriver interface { // and parent, with contents identical to the specified template layer. CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error // Remove attempts to remove the filesystem layer with this id. + // This is soft-deprecated and should not get any new callers; use DeferredRemove. Remove(id string) error + // DeferredRemove is used to remove the filesystem layer with this id. + // This removal happen immediately (the layer is no longer usable), + // but physically deleting the files may be deferred. + // Caller MUST call returned Cleanup function EVEN IF the function returns an error. + DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) + // GetTempDirRootDirs returns the root directories for temporary directories. + // Multiple directories may be returned when drivers support different filesystems + // for layers (e.g., overlay with imageStore vs home directory). + GetTempDirRootDirs() []string // Get returns the mountpoint for the layered filesystem referred // to by this id. You can optionally specify a mountLabel or "". // Optionally it gets the mappings used to create the layer. @@ -139,6 +168,8 @@ type ProtoDriver interface { // AdditionalImageStores returns additional image stores supported by the driver // This API is experimental and can be changed without bumping the major version number. AdditionalImageStores() []string + // Dedup performs deduplication of the driver's storage. + Dedup(DedupArgs) (DedupResult, error) } // DiffDriver is the interface to use to implement graph diffs @@ -173,8 +204,9 @@ type LayerIDMapUpdater interface { UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error // SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in a - // image and it is not required to Chown the files when running in an user namespace. - SupportsShifting() bool + // image to the provided mapping and it is not required to Chown the files when running in + // an user namespace. + SupportsShifting(uidmap, gidmap []idtools.IDMap) bool } // Driver is the interface for layered/snapshot file system drivers. @@ -196,13 +228,15 @@ type DriverWithDifferOutput struct { CompressedDigest digest.Digest Metadata string BigData map[string][]byte - TarSplit []byte // nil if not available - TOCDigest digest.Digest + // TarSplit is owned by the [DriverWithDifferOutput], and must be closed by calling one of + // [Store.ApplyStagedLayer]/[Store.CleanupStagedLayer]. It is nil if not available. + TarSplit *os.File + TOCDigest digest.Digest // RootDirMode is the mode of the root directory of the layer, if specified. RootDirMode *os.FileMode // Artifacts is a collection of additional artifacts // generated by the differ that the storage driver can use. - Artifacts map[string]interface{} + Artifacts map[string]any } type DifferOutputFormat int @@ -211,8 +245,8 @@ const ( // DifferOutputFormatDir means the output is a directory and it will // keep the original layout. DifferOutputFormatDir = iota - // DifferOutputFormatFlat will store the files by their checksum, in the form - // checksum[0:2]/checksum[2:] + // DifferOutputFormatFlat will store the files by their checksum, per + // pkg/chunked/internal/composefs.RegularFilePathForValidatedDigest. DifferOutputFormatFlat ) @@ -247,6 +281,7 @@ type DifferOptions struct { // This API is experimental and can be changed without bumping the major version number. type Differ interface { ApplyDiff(dest string, options *archive.TarOptions, differOpts *DifferOptions) (DriverWithDifferOutput, error) + Close() error } // DriverWithDiffer is the interface for direct diff access. diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index 52770174..e398b0aa 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -259,7 +259,7 @@ func supportsIdmappedLowerLayers(home string) (bool, error) { } defer cleanupFunc() - if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil { + if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, pid); err != nil { return false, fmt.Errorf("create mapped mount: %w", err) } defer func() { diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go index 7867d500..5cbf5e1c 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check_116.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go @@ -10,7 +10,6 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/system" - "golang.org/x/sys/unix" ) func scanForMountProgramIndicators(home string) (detected bool, err error) { @@ -28,7 +27,7 @@ func scanForMountProgramIndicators(home string) (detected bool, err error) { } if d.IsDir() { xattrs, err := system.Llistxattr(path) - if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + if err != nil && !errors.Is(err, system.ENOTSUP) { return err } for _, xattr := range xattrs { diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go index 1fb3e62a..270425a5 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go @@ -1,4 +1,4 @@ -//go:build linux && cgo +//go:build linux package overlay @@ -27,7 +27,7 @@ var ( composeFsHelperErr error // skipMountViaFile is used to avoid trying to mount EROFS directly via the file if we already know the current kernel - // does not support it. Mounting directly via a file will be supported in kernel 6.12. + // does not support it. Mounting directly via a file is supported from Linux 6.12. skipMountViaFile atomic.Bool ) @@ -42,7 +42,7 @@ func getComposefsBlob(dataDir string) string { return filepath.Join(dataDir, "composefs.blob") } -func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { +func generateComposeFsBlob(verityDigests map[string]string, toc any, composefsDir string) error { if err := os.MkdirAll(composefsDir, 0o700); err != nil { return err } @@ -53,7 +53,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com } destFile := getComposefsBlob(composefsDir) - writerJson, err := getComposeFsHelper() + writerJSON, err := getComposeFsHelper() if err != nil { return fmt.Errorf("failed to find mkcomposefs: %w", err) } @@ -74,7 +74,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com defer outFile.Close() errBuf := &bytes.Buffer{} - cmd := exec.Command(writerJson, "--from-file", "-", "-") + cmd := exec.Command(writerJSON, "--from-file", "-", "-") cmd.Stderr = errBuf cmd.Stdin = dumpReader cmd.Stdout = outFile diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index caf89646..2bc115af 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -22,6 +22,9 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/drivers/overlayutils" "github.com/containers/storage/drivers/quota" + "github.com/containers/storage/internal/dedup" + "github.com/containers/storage/internal/staging_lockfile" + "github.com/containers/storage/internal/tempdir" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" @@ -29,13 +32,11 @@ import ( "github.com/containers/storage/pkg/fsutils" "github.com/containers/storage/pkg/idmap" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" - "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" @@ -80,10 +81,11 @@ const ( // that mounts do not fail due to length. const ( - linkDir = "l" - stagingDir = "staging" - lowerFile = "lower" - maxDepth = 500 + linkDir = "l" + stagingDir = "staging" + tempDirName = "tempdirs" + lowerFile = "lower" + maxDepth = 500 stagingLockFile = "staging.lock" @@ -130,7 +132,10 @@ type Driver struct { usingMetacopy bool usingComposefs bool - stagingDirsLocks map[string]*lockfile.LockFile + stagingDirsLocksMutex sync.Mutex + // stagingDirsLocks access is not thread safe, it is required that callers take + // stagingDirsLocksMutex on each access to guard against concurrent map writes. + stagingDirsLocks map[string]*staging_lockfile.StagingLockFile supportsIDMappedMounts *bool } @@ -219,7 +224,7 @@ func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) { return supportsIDMappedMounts, err } -func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) { +func checkAndRecordOverlaySupport(home, runhome string) (bool, error) { var supportsDType bool if os.Geteuid() != 0 { @@ -239,7 +244,7 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str return false, errors.New(overlayCacheText) } } else { - supportsDType, err = supportsOverlay(home, fsMagic, 0, 0) + supportsDType, err = supportsOverlay(home, 0, 0) if err != nil { os.Remove(filepath.Join(home, linkDir)) os.Remove(home) @@ -385,7 +390,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) t := true supportsVolatile = &t } else { - supportsDType, err = checkAndRecordOverlaySupport(fsMagic, home, runhome) + supportsDType, err = checkAndRecordOverlaySupport(home, runhome) if err != nil { return nil, err } @@ -418,7 +423,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) if !opts.skipMountHome { if err := mount.MakePrivate(home); err != nil { - return nil, err + return nil, fmt.Errorf("overlay: failed to make mount private: %w", err) } } @@ -428,17 +433,18 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } d := &Driver{ - name: "overlay", - home: home, - imageStore: options.ImageStore, - runhome: runhome, - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)), - supportsDType: supportsDType, - usingMetacopy: usingMetacopy, - supportsVolatile: supportsVolatile, - usingComposefs: opts.useComposefs, - options: *opts, - stagingDirsLocks: make(map[string]*lockfile.LockFile), + name: "overlay", + home: home, + imageStore: options.ImageStore, + runhome: runhome, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)), + supportsDType: supportsDType, + usingMetacopy: usingMetacopy, + supportsVolatile: supportsVolatile, + usingComposefs: opts.useComposefs, + options: *opts, + stagingDirsLocksMutex: sync.Mutex{}, + stagingDirsLocks: make(map[string]*staging_lockfile.StagingLockFile), } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)) @@ -488,7 +494,7 @@ func parseOptions(options []string) (*overlayOptions, error) { if err != nil { return nil, err } - o.quota.Inodes = uint64(inodes) + o.quota.Inodes = inodes case "imagestore", "additionalimagestore": logrus.Debugf("overlay: imagestore=%s", val) // Additional read only image stores to use for lower paths @@ -639,6 +645,8 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { case "true": logrus.Debugf("overlay: storage already configured with a mount-program") return false, nil + case "false": + // Do nothing. default: needsMountProgram, err := scanForMountProgramIndicators(home) if err != nil && !os.IsNotExist(err) { @@ -652,7 +660,6 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { } // fall through to check if we find ourselves needing to use a // mount program now - case "false": } for _, dir := range []string{home, runhome} { @@ -661,16 +668,11 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { } } - fsMagic, err := graphdriver.GetFSMagic(home) - if err != nil { - return false, err - } - - supportsDType, _ := checkAndRecordOverlaySupport(fsMagic, home, runhome) + supportsDType, _ := checkAndRecordOverlaySupport(home, runhome) return supportsDType, nil } -func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { +func supportsOverlay(home string, rootUID, rootGID int) (supportsDType bool, err error) { selinuxLabelTest := selinux.PrivContainerMountLabel() logLevel := logrus.ErrorLevel @@ -823,7 +825,7 @@ func (d *Driver) Status() [][2]string { {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!d.useNaiveDiff())}, {"Using metacopy", strconv.FormatBool(d.usingMetacopy)}, - {"Supports shifting", strconv.FormatBool(d.SupportsShifting())}, + {"Supports shifting", strconv.FormatBool(d.SupportsShifting(nil, nil))}, {"Supports volatile", strconv.FormatBool(supportsVolatile)}, } } @@ -867,10 +869,14 @@ func (d *Driver) Cleanup() error { // pruneStagingDirectories cleans up any staging directory that was leaked. // It returns whether any staging directory is still present. func (d *Driver) pruneStagingDirectories() bool { + d.stagingDirsLocksMutex.Lock() for _, lock := range d.stagingDirsLocks { - lock.Unlock() + if err := lock.UnlockAndDelete(); err != nil { + logrus.Warnf("Failed to unlock and delete staging lock file: %v", err) + } } - d.stagingDirsLocks = make(map[string]*lockfile.LockFile) + clear(d.stagingDirsLocks) + d.stagingDirsLocksMutex.Unlock() anyPresent := false @@ -879,17 +885,15 @@ func (d *Driver) pruneStagingDirectories() bool { if err == nil { for _, dir := range dirs { stagingDirToRemove := filepath.Join(stagingDirBase, dir.Name()) - lock, err := lockfile.GetLockFile(filepath.Join(stagingDirToRemove, stagingLockFile)) + lock, err := staging_lockfile.TryLockPath(filepath.Join(stagingDirToRemove, stagingLockFile)) if err != nil { anyPresent = true continue } - if err := lock.TryLock(); err != nil { - anyPresent = true - continue - } _ = os.RemoveAll(stagingDirToRemove) - lock.Unlock() + if err := lock.UnlockAndDelete(); err != nil { + logrus.Warnf("Failed to unlock and delete staging lock file: %v", err) + } } } return anyPresent @@ -1096,6 +1100,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl } if d.options.forceMask != nil { + st.Mode |= os.ModeDir if err := idtools.SetContainersOverrideXattr(diff, st); err != nil { return err } @@ -1155,7 +1160,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e if err != nil { return err } - driver.options.quota.Inodes = uint64(inodes) + driver.options.quota.Inodes = inodes default: return fmt.Errorf("unknown option %s", key) } @@ -1302,17 +1307,22 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { + return d.removeCommon(id, system.EnsureRemoveAll) +} + +func (d *Driver) removeCommon(id string, cleanup func(string) error) error { dir := d.dir(id) lid, err := os.ReadFile(path.Join(dir, "link")) if err == nil { - if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + linkPath := path.Join(d.home, linkDir, string(lid)) + if err := cleanup(linkPath); err != nil { logrus.Debugf("Failed to remove link: %v", err) } } d.releaseAdditionalLayerByID(id) - if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { + if err := cleanup(dir); err != nil && !os.IsNotExist(err) { return err } if d.quotaCtl != nil { @@ -1324,6 +1334,41 @@ func (d *Driver) Remove(id string) error { return nil } +func (d *Driver) GetTempDirRootDirs() []string { + tempDirs := []string{filepath.Join(d.home, tempDirName)} + // Include imageStore temp directory if it's configured + // Writable layers can only be in d.home or d.imageStore, not in additional image stores + if d.imageStore != "" { + tempDirs = append(tempDirs, filepath.Join(d.imageStore, d.name, tempDirName)) + } + return tempDirs +} + +// Determine the correct temp directory root based on where the layer actually exists. +func (d *Driver) getTempDirRoot(id string) string { + layerDir := d.dir(id) + if d.imageStore != "" { + expectedLayerDir := path.Join(d.imageStore, d.name, id) + if layerDir == expectedLayerDir { + return filepath.Join(d.imageStore, d.name, tempDirName) + } + } + return filepath.Join(d.home, tempDirName) +} + +func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) { + tempDirRoot := d.getTempDirRoot(id) + t, err := tempdir.NewTempDir(tempDirRoot) + if err != nil { + return nil, err + } + + if err := d.removeCommon(id, t.StageDeletion); err != nil { + return t.Cleanup, fmt.Errorf("failed to add to stage directory: %w", err) + } + return t.Cleanup, nil +} + // recreateSymlinks goes through the driver's home directory and checks if the diff directory // under each layer has a symlink created for it under the linkDir. If the symlink does not // exist, it creates them @@ -1341,7 +1386,7 @@ func (d *Driver) recreateSymlinks() error { return err } // Keep looping as long as we take some corrective action in each iteration - var errs *multierror.Error + var errs error madeProgress := true iterations := 0 for madeProgress { @@ -1350,14 +1395,14 @@ func (d *Driver) recreateSymlinks() error { // Check that for each layer, there's a link in "l" with the name in // the layer's "link" file that points to the layer's "diff" directory. for _, dir := range dirs { - // Skip over the linkDir and anything that is not a directory - if dir.Name() == linkDir || !dir.IsDir() { + // Skip over the linkDir, stagingDir, tempDirName and anything that is not a directory + if dir.Name() == linkDir || dir.Name() == stagingDir || dir.Name() == tempDirName || !dir.IsDir() { continue } // Read the "link" file under each layer to get the name of the symlink data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link")) if err != nil { - errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err)) + errs = errors.Join(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err)) continue } linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) @@ -1366,12 +1411,12 @@ func (d *Driver) recreateSymlinks() error { err = fileutils.Lexists(linkPath) if err != nil && os.IsNotExist(err) { if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil { - errs = multierror.Append(errs, err) + errs = errors.Join(errs, err) continue } madeProgress = true } else if err != nil { - errs = multierror.Append(errs, err) + errs = errors.Join(errs, err) continue } } @@ -1382,7 +1427,7 @@ func (d *Driver) recreateSymlinks() error { // that each symlink we have corresponds to one. links, err := os.ReadDir(linkDirFullPath) if err != nil { - errs = multierror.Append(errs, err) + errs = errors.Join(errs, err) continue } // Go through all of the symlinks in the "l" directory @@ -1390,16 +1435,16 @@ func (d *Driver) recreateSymlinks() error { // Read the symlink's target, which should be "../$layer/diff" target, err := os.Readlink(filepath.Join(linkDirFullPath, link.Name())) if err != nil { - errs = multierror.Append(errs, err) + errs = errors.Join(errs, err) continue } targetComponents := strings.Split(target, string(os.PathSeparator)) if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" { - errs = multierror.Append(errs, fmt.Errorf("link target of %q looks weird: %q", link, target)) + errs = errors.Join(errs, fmt.Errorf("link target of %q looks weird: %q", link, target)) // force the link to be recreated on the next pass if err := os.Remove(filepath.Join(linkDirFullPath, link.Name())); err != nil { if !os.IsNotExist(err) { - errs = multierror.Append(errs, fmt.Errorf("removing link %q: %w", link, err)) + errs = errors.Join(errs, fmt.Errorf("removing link %q: %w", link, err)) } // else don’t report any error, but also don’t set madeProgress. continue } @@ -1415,7 +1460,7 @@ func (d *Driver) recreateSymlinks() error { // NOTE: If two or more links point to the same target, we will update linkFile // with every value of link.Name(), and set madeProgress = true every time. if err := os.WriteFile(linkFile, []byte(link.Name()), 0o644); err != nil { - errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err)) + errs = errors.Join(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err)) continue } madeProgress = true @@ -1423,14 +1468,11 @@ func (d *Driver) recreateSymlinks() error { } iterations++ if iterations >= maxIterations { - errs = multierror.Append(errs, fmt.Errorf("reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) + errs = errors.Join(errs, fmt.Errorf("reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) break } } - if errs != nil { - return errs.ErrorOrNil() - } - return nil + return errs } // Get creates and mounts the required file system for the given id and returns the mount path. @@ -1478,7 +1520,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO readWrite := !inAdditionalStore - if !d.SupportsShifting() || options.DisableShifting { + if !d.SupportsShifting(options.UidMaps, options.GidMaps) || options.DisableShifting { disableShifting = true } @@ -1546,7 +1588,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO permsKnown := false st, err := os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) if err == nil { - perms = os.FileMode(st.Mode()) + perms = st.Mode() permsKnown = true } for err == nil { @@ -1561,7 +1603,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if err != nil { return "", err } - idmappedMountProcessPid = int(pid) + idmappedMountProcessPid = pid defer cleanupFunc() } @@ -1633,7 +1675,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO lower = path.Join(p, d.name, l) if st2, err2 := os.Stat(lower); err2 == nil { if !permsKnown { - perms = os.FileMode(st2.Mode()) + perms = st2.Mode() permsKnown = true } break @@ -1654,7 +1696,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } } else { if !permsKnown { - perms = os.FileMode(st.Mode()) + perms = st.Mode() permsKnown = true } lower = newpath @@ -2022,7 +2064,7 @@ func (d *Driver) ListLayers() ([]string, error) { for _, entry := range entries { id := entry.Name() switch id { - case linkDir, stagingDir, quota.BackingFsBlockDeviceLink, mountProgramFlagFile: + case linkDir, stagingDir, tempDirName, quota.BackingFsBlockDeviceLink, mountProgramFlagFile: // expected, but not a layer. skip it continue default: @@ -2101,17 +2143,16 @@ func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) { return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist) } -func (g *overlayFileGetter) Close() error { - var errs *multierror.Error +func (g *overlayFileGetter) Close() (errs error) { for _, f := range g.composefsMounts { if err := f.Close(); err != nil { - errs = multierror.Append(errs, err) + errs = errors.Join(errs, err) } if err := unix.Rmdir(f.Name()); err != nil { - errs = multierror.Append(errs, err) + errs = errors.Join(errs, err) } } - return errs.ErrorOrNil() + return errs } // newStagingDir creates a new staging directory and returns the path to it. @@ -2171,10 +2212,15 @@ func (d *Driver) DiffGetter(id string) (_ graphdriver.FileGetCloser, Err error) func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error { parentStagingDir := filepath.Dir(stagingDirectory) + d.stagingDirsLocksMutex.Lock() if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok { delete(d.stagingDirsLocks, parentStagingDir) - lock.Unlock() + if err := lock.UnlockAndDelete(); err != nil { + d.stagingDirsLocksMutex.Unlock() + return err + } } + d.stagingDirsLocksMutex.Unlock() return os.RemoveAll(parentStagingDir) } @@ -2227,18 +2273,23 @@ func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpt return graphdriver.DriverWithDifferOutput{}, err } - lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile)) + lock, err := staging_lockfile.TryLockPath(filepath.Join(layerDir, stagingLockFile)) if err != nil { return graphdriver.DriverWithDifferOutput{}, err } defer func() { if errRet != nil { + d.stagingDirsLocksMutex.Lock() delete(d.stagingDirsLocks, layerDir) - lock.Unlock() + d.stagingDirsLocksMutex.Unlock() + if err := lock.UnlockAndDelete(); err != nil { + errRet = errors.Join(errRet, err) + } } }() + d.stagingDirsLocksMutex.Lock() d.stagingDirsLocks[layerDir] = lock - lock.Lock() + d.stagingDirsLocksMutex.Unlock() logrus.Debugf("Applying differ in %s", applyDir) @@ -2264,15 +2315,19 @@ func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpt } // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. -func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error { +func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) (errRet error) { stagingDirectory := diffOutput.Target parentStagingDir := filepath.Dir(stagingDirectory) defer func() { + d.stagingDirsLocksMutex.Lock() if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok { delete(d.stagingDirsLocks, parentStagingDir) - lock.Unlock() + if err := lock.UnlockAndDelete(); err != nil { + errRet = errors.Join(errRet, err) + } } + d.stagingDirsLocksMutex.Unlock() }() diffPath, err := d.getDiffPath(id) @@ -2493,7 +2548,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp perms = *d.options.forceMask } else { if err == nil { - perms = os.FileMode(st.Mode()) + perms = st.Mode() } } for err == nil { @@ -2541,12 +2596,20 @@ func (d *Driver) supportsIDmappedMounts() bool { return false } -// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS -func (d *Driver) SupportsShifting() bool { +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs to the provided mapping in an userNS +func (d *Driver) SupportsShifting(uidmap, gidmap []idtools.IDMap) bool { if os.Getenv("_CONTAINERS_OVERLAY_DISABLE_IDMAP") == "yes" { return false } if d.options.mountProgram != "" { + // fuse-overlayfs supports only contiguous mappings, since it performs the mapping on the + // upper layer too, to avoid https://github.com/containers/podman/issues/10272 + if !idtools.IsContiguous(uidmap) { + return false + } + if !idtools.IsContiguous(gidmap) { + return false + } return true } return d.supportsIDmappedMounts() @@ -2740,3 +2803,22 @@ func getMappedMountRoot(path string) string { } return dirName } + +// Dedup performs deduplication of the driver's storage. +func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + var dirs []string + for _, layer := range req.Layers { + dir, _, inAdditionalStore := d.dir2(layer, false) + if inAdditionalStore { + continue + } + if err := fileutils.Exists(dir); err == nil { + dirs = append(dirs, filepath.Join(dir, "diff")) + } + } + r, err := dedup.DedupDirs(dirs, req.Options) + if err != nil { + return graphdriver.DedupResult{}, err + } + return graphdriver.DedupResult{Deduped: r.Deduped}, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go deleted file mode 100644 index a6df21d0..00000000 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build linux && !cgo - -package overlay - -import ( - "fmt" -) - -func openComposefsMount(dataDir string) (int, error) { - return 0, fmt.Errorf("composefs not supported on this build") -} - -func getComposeFsHelper() (string, error) { - return "", fmt.Errorf("composefs not supported on this build") -} - -func mountComposefsBlob(dataDir, mountPoint string) error { - return fmt.Errorf("composefs not supported on this build") -} - -func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { - return fmt.Errorf("composefs not supported on this build") -} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go index 59ba5b0b..c3b334f7 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go @@ -190,7 +190,8 @@ func NewControl(basePath string) (*Control, error) { } // SetQuota - assign a unique project id to directory and set the quota limits -// for that project id +// for that project id. +// targetPath must exist, must be a directory, and must be empty. func (q *Control) SetQuota(targetPath string, quota Quota) error { var projectID uint32 value, ok := q.quotas.Load(targetPath) @@ -200,10 +201,20 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error { if !ok { projectID = q.nextProjectID + // The directory we are setting an ID on must be empty, as + // the ID will not be propagated to pre-existing subdirectories. + dents, err := os.ReadDir(targetPath) + if err != nil { + return fmt.Errorf("reading directory %s: %w", targetPath, err) + } + if len(dents) > 0 { + return fmt.Errorf("can only set project ID on empty directories, %s is not empty", targetPath) + } + // // assign project id to new container directory // - err := setProjectID(targetPath, projectID) + err = setProjectID(targetPath, projectID) if err != nil { return err } diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go index 6e82a1ea..c4f96282 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_overlay.go +++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go @@ -1,4 +1,4 @@ -//go:build !exclude_graphdriver_overlay && linux && cgo +//go:build !exclude_graphdriver_overlay && linux package register diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index f60ec17b..7ad5186a 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -10,6 +10,8 @@ import ( "strings" graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/internal/dedup" + "github.com/containers/storage/internal/tempdir" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fileutils" @@ -21,7 +23,10 @@ import ( "github.com/vbatts/tar-split/tar/storage" ) -const defaultPerms = os.FileMode(0o555) +const ( + defaultPerms = os.FileMode(0o555) + tempDirName = "tempdirs" +) func init() { graphdriver.MustRegister("vfs", Init) @@ -243,6 +248,42 @@ func (d *Driver) Remove(id string) error { return system.EnsureRemoveAll(d.dir(id)) } +func (d *Driver) GetTempDirRootDirs() []string { + tempDirs := []string{filepath.Join(d.home, tempDirName)} + // Include imageStore temp directory if it's configured + // Writable layers can only be in d.home or d.imageStore, not in additionalHomes (which are read-only) + if d.imageStore != "" { + tempDirs = append(tempDirs, filepath.Join(d.imageStore, d.String(), tempDirName)) + } + return tempDirs +} + +// Determine the correct temp directory root based on where the layer actually exists. +func (d *Driver) getTempDirRoot(id string) string { + layerDir := d.dir(id) + if d.imageStore != "" { + expectedLayerDir := filepath.Join(d.imageStore, d.String(), "dir", filepath.Base(id)) + if layerDir == expectedLayerDir { + return filepath.Join(d.imageStore, d.String(), tempDirName) + } + } + return filepath.Join(d.home, tempDirName) +} + +func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) { + tempDirRoot := d.getTempDirRoot(id) + t, err := tempdir.NewTempDir(tempDirRoot) + if err != nil { + return nil, err + } + + layerDir := d.dir(id) + if err := t.StageDeletion(layerDir); err != nil { + return t.Cleanup, err + } + return t.Cleanup, nil +} + // Get returns the directory for the given id. func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { dir := d.dir(id) @@ -311,9 +352,9 @@ func (d *Driver) AdditionalImageStores() []string { return nil } -// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS -func (d *Driver) SupportsShifting() bool { - return d.updater.SupportsShifting() +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs to the provided mapping in an userNS +func (d *Driver) SupportsShifting(uidmap, gidmap []idtools.IDMap) bool { + return d.updater.SupportsShifting(uidmap, gidmap) } // UpdateLayerIDMap updates ID mappings in a from matching the ones specified @@ -348,3 +389,19 @@ func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) } + +// Dedup performs deduplication of the driver's storage. +func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + var dirs []string + for _, layer := range req.Layers { + dir := d.dir2(layer, false) + if err := fileutils.Exists(dir); err == nil { + dirs = append(dirs, dir) + } + } + r, err := dedup.DedupDirs(dirs, req.Options) + if err != nil { + return graphdriver.DedupResult{}, err + } + return graphdriver.DedupResult{Deduped: r.Deduped}, nil +} diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index d38e7453..13bd75cf 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -24,6 +24,7 @@ import ( "github.com/Microsoft/go-winio/backuptar" "github.com/Microsoft/hcsshim" graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/internal/tempdir" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fileutils" @@ -975,14 +976,19 @@ func (d *Driver) AdditionalImageStores() []string { return nil } +// Dedup performs deduplication of the driver's storage. +func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + return graphdriver.DedupResult{}, nil +} + // UpdateLayerIDMap changes ownerships in the layer's filesystem tree from // matching those in toContainer to matching those in toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { return fmt.Errorf("windows doesn't support changing ID mappings") } -// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS -func (d *Driver) SupportsShifting() bool { +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs to the provided mapping in an userNS +func (d *Driver) SupportsShifting(uidmap, gidmap []idtools.IDMap) bool { return false } @@ -1009,3 +1015,14 @@ func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { } return &options, nil } + +// DeferredRemove is not implemented. +// It calls Remove directly. +func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) { + return nil, d.Remove(id) +} + +// GetTempDirRootDirs is not implemented. +func (d *Driver) GetTempDirRootDirs() []string { + return []string{} +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index ef26c6c7..6be430b3 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -13,6 +13,7 @@ import ( "time" graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/internal/tempdir" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" @@ -406,6 +407,12 @@ func (d *Driver) Remove(id string) error { return nil } +// DeferredRemove is not implemented. +// It calls Remove directly. +func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) { + return nil, d.Remove(id) +} + // Get returns the mountpoint for the given id after creating the target directories if necessary. func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { mountpoint := d.mountPath(id) @@ -511,3 +518,13 @@ func (d *Driver) ListLayers() ([]string, error) { func (d *Driver) AdditionalImageStores() []string { return nil } + +// Dedup performs deduplication of the driver's storage. +func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + return graphdriver.DedupResult{}, nil +} + +// GetTempDirRootDirs is not implemented. +func (d *Driver) GetTempDirRootDirs() []string { + return []string{} +} diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index 5c9127ed..74f738cd 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -93,7 +93,7 @@ type Image struct { // ReadOnly is true if this image resides in a read-only layer store. ReadOnly bool `json:"-"` - Flags map[string]interface{} `json:"flags,omitempty"` + Flags map[string]any `json:"flags,omitempty"` } // roImageStore provides bookkeeping for information about Images. @@ -675,7 +675,7 @@ func (r *imageStore) ClearFlag(id string, flag string) error { } // Requires startWriting. -func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { +func (r *imageStore) SetFlag(id string, flag string, value any) error { if !r.lockfile.IsReadWrite() { return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } @@ -684,7 +684,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } if image.Flags == nil { - image.Flags = make(map[string]interface{}) + image.Flags = make(map[string]any) } image.Flags[flag] = value return r.Save() diff --git a/vendor/github.com/containers/storage/internal/dedup/dedup.go b/vendor/github.com/containers/storage/internal/dedup/dedup.go new file mode 100644 index 00000000..56d746a3 --- /dev/null +++ b/vendor/github.com/containers/storage/internal/dedup/dedup.go @@ -0,0 +1,163 @@ +package dedup + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + "fmt" + "hash/crc64" + "io/fs" + "sync" + + "github.com/opencontainers/selinux/pkg/pwalkdir" + "github.com/sirupsen/logrus" +) + +var errNotSupported = errors.New("reflinks are not supported on this platform") + +const ( + DedupHashInvalid DedupHashMethod = iota + DedupHashCRC + DedupHashFileSize + DedupHashSHA256 +) + +type DedupHashMethod int + +type DedupOptions struct { + // HashMethod is the hash function to use to find identical files + HashMethod DedupHashMethod +} + +type DedupResult struct { + // Deduped represents the total number of bytes saved by deduplication. + // This value accounts also for all previously deduplicated data, not only the savings + // from the last run. + Deduped uint64 +} + +func getFileChecksum(hashMethod DedupHashMethod, path string, info fs.FileInfo) (string, error) { + switch hashMethod { + case DedupHashInvalid: + return "", fmt.Errorf("invalid hash method: %v", hashMethod) + case DedupHashFileSize: + return fmt.Sprintf("%v", info.Size()), nil + case DedupHashSHA256: + return readAllFile(path, info, func(buf []byte) (string, error) { + h := sha256.New() + if _, err := h.Write(buf); err != nil { + return "", err + } + return string(h.Sum(nil)), nil + }) + case DedupHashCRC: + return readAllFile(path, info, func(buf []byte) (string, error) { + c := crc64.New(crc64.MakeTable(crc64.ECMA)) + if _, err := c.Write(buf); err != nil { + return "", err + } + bufRet := make([]byte, 8) + binary.BigEndian.PutUint64(bufRet, c.Sum64()) + return string(bufRet), nil + }) + default: + return "", fmt.Errorf("unknown hash method: %v", hashMethod) + } +} + +type pathsLocked struct { + paths []string + lock sync.Mutex +} + +func DedupDirs(dirs []string, options DedupOptions) (DedupResult, error) { + res := DedupResult{} + hashToPaths := make(map[string]*pathsLocked) + lock := sync.Mutex{} // protects `hashToPaths` and `res` + + dedup, err := newDedupFiles() + if err != nil { + return res, err + } + + for _, dir := range dirs { + logrus.Debugf("Deduping directory %s", dir) + if err := pwalkdir.Walk(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.Type().IsRegular() { + return nil + } + info, err := d.Info() + if err != nil { + return err + } + size := uint64(info.Size()) + if size == 0 { + // do not bother with empty files + return nil + } + + // the file was already deduplicated + if visited, err := dedup.isFirstVisitOf(info); err != nil { + return err + } else if visited { + return nil + } + + h, err := getFileChecksum(options.HashMethod, path, info) + if err != nil { + return err + } + + lock.Lock() + item, foundItem := hashToPaths[h] + if !foundItem { + item = &pathsLocked{paths: []string{path}} + hashToPaths[h] = item + lock.Unlock() + return nil + } + item.lock.Lock() + lock.Unlock() + + dedupBytes, err := func() (uint64, error) { // function to have a scope for the defer statement + defer item.lock.Unlock() + + var dedupBytes uint64 + for _, src := range item.paths { + deduped, err := dedup.dedup(src, path, info) + if err == nil && deduped > 0 { + logrus.Debugf("Deduped %q -> %q (%d bytes)", src, path, deduped) + dedupBytes += deduped + break + } + logrus.Debugf("Failed to deduplicate: %v", err) + if errors.Is(err, errNotSupported) { + return dedupBytes, err + } + } + if dedupBytes == 0 { + item.paths = append(item.paths, path) + } + return dedupBytes, nil + }() + if err != nil { + return err + } + + lock.Lock() + res.Deduped += dedupBytes + lock.Unlock() + return nil + }); err != nil { + // if reflinks are not supported, return immediately without errors + if errors.Is(err, errNotSupported) { + return res, nil + } + return res, err + } + } + return res, nil +} diff --git a/vendor/github.com/containers/storage/internal/dedup/dedup_linux.go b/vendor/github.com/containers/storage/internal/dedup/dedup_linux.go new file mode 100644 index 00000000..e13548ff --- /dev/null +++ b/vendor/github.com/containers/storage/internal/dedup/dedup_linux.go @@ -0,0 +1,139 @@ +package dedup + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +type deviceInodePair struct { + dev uint64 + ino uint64 +} + +type dedupFiles struct { + lock sync.Mutex + visitedInodes map[deviceInodePair]struct{} +} + +func newDedupFiles() (*dedupFiles, error) { + return &dedupFiles{ + visitedInodes: make(map[deviceInodePair]struct{}), + }, nil +} + +func (d *dedupFiles) recordInode(dev, ino uint64) (bool, error) { + d.lock.Lock() + defer d.lock.Unlock() + + di := deviceInodePair{ + dev: dev, + ino: ino, + } + + _, visited := d.visitedInodes[di] + d.visitedInodes[di] = struct{}{} + return visited, nil +} + +// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited. +func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return false, fmt.Errorf("unable to get raw syscall.Stat_t data") + } + return d.recordInode(uint64(st.Dev), st.Ino) //nolint:unconvert +} + +// dedup deduplicates the file at src path to dst path +func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) { + srcFile, err := os.OpenFile(src, os.O_RDONLY, 0) + if err != nil { + return 0, fmt.Errorf("failed to open source file: %w", err) + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dst, os.O_WRONLY, 0) + if err != nil { + return 0, fmt.Errorf("failed to open destination file: %w", err) + } + defer dstFile.Close() + + stSrc, err := srcFile.Stat() + if err != nil { + return 0, fmt.Errorf("failed to stat source file: %w", err) + } + sSrc, ok := stSrc.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("unable to get raw syscall.Stat_t data") + } + sDest, ok := fiDst.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("unable to get raw syscall.Stat_t data") + } + if sSrc.Dev == sDest.Dev && sSrc.Ino == sDest.Ino { + // same inode, we are dealing with a hard link, no need to deduplicate + return 0, nil + } + + value := unix.FileDedupeRange{ + Src_offset: 0, + Src_length: uint64(stSrc.Size()), + Info: []unix.FileDedupeRangeInfo{ + { + Dest_fd: int64(dstFile.Fd()), + Dest_offset: 0, + }, + }, + } + err = unix.IoctlFileDedupeRange(int(srcFile.Fd()), &value) + if err == nil { + return value.Info[0].Bytes_deduped, nil + } + + if errors.Is(err, unix.ENOTSUP) { + return 0, errNotSupported + } + return 0, fmt.Errorf("failed to clone file %q: %w", src, err) +} + +func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) { + size := info.Size() + if size == 0 { + return fn(nil) + } + + file, err := os.Open(path) + if err != nil { + return "", err + } + defer file.Close() + + if size < 4096 { + // small file, read it all + data := make([]byte, size) + _, err = io.ReadFull(file, data) + if err != nil { + return "", err + } + return fn(data) + } + + mmap, err := unix.Mmap(int(file.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_PRIVATE) + if err != nil { + return "", fmt.Errorf("failed to mmap file: %w", err) + } + defer func() { + _ = unix.Munmap(mmap) + }() + + _ = unix.Madvise(mmap, unix.MADV_SEQUENTIAL) + + return fn(mmap) +} diff --git a/vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go b/vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go new file mode 100644 index 00000000..22ec081c --- /dev/null +++ b/vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go @@ -0,0 +1,27 @@ +//go:build !linux + +package dedup + +import ( + "io/fs" +) + +type dedupFiles struct{} + +func newDedupFiles() (*dedupFiles, error) { + return nil, errNotSupported +} + +// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited. +func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) { + return false, errNotSupported +} + +// dedup deduplicates the file at src path to dst path +func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) { + return 0, errNotSupported +} + +func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) { + return "", errNotSupported +} diff --git a/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock.go b/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock.go new file mode 100644 index 00000000..4f340ae3 --- /dev/null +++ b/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock.go @@ -0,0 +1,64 @@ +package rawfilelock + +import ( + "os" +) + +type LockType byte + +const ( + ReadLock LockType = iota + WriteLock +) + +type FileHandle = fileHandle + +// OpenLock opens a file for locking +// WARNING: This is the underlying file locking primitive of the OS; +// because closing FileHandle releases the lock, it is not suitable for use +// if there is any chance of two concurrent goroutines attempting to use the same lock. +// Most users should use the higher-level operations from internal/staging_lockfile or pkg/lockfile. +func OpenLock(path string, readOnly bool) (FileHandle, error) { + flags := os.O_CREATE + if readOnly { + flags |= os.O_RDONLY + } else { + flags |= os.O_RDWR + } + + fd, err := openHandle(path, flags) + if err == nil { + return fd, nil + } + + return fd, &os.PathError{Op: "open", Path: path, Err: err} +} + +// TryLockFile attempts to lock a file handle +func TryLockFile(fd FileHandle, lockType LockType) error { + return lockHandle(fd, lockType, true) +} + +// LockFile locks a file handle +func LockFile(fd FileHandle, lockType LockType) error { + return lockHandle(fd, lockType, false) +} + +// UnlockAndClose unlocks and closes a file handle +func UnlockAndCloseHandle(fd FileHandle) { + unlockAndCloseHandle(fd) +} + +// CloseHandle closes a file handle without unlocking +// +// WARNING: This is a last-resort function for error handling only! +// On Unix systems, closing a file descriptor automatically releases any locks, +// so "closing without unlocking" is impossible. This function will release +// the lock as a side effect of closing the file. +// +// This function should only be used in error paths where the lock state +// is already corrupted or when giving up on lock management entirely. +// Normal code should use UnlockAndCloseHandle instead. +func CloseHandle(fd FileHandle) { + closeHandle(fd) +} diff --git a/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_unix.go b/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_unix.go new file mode 100644 index 00000000..26855407 --- /dev/null +++ b/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_unix.go @@ -0,0 +1,49 @@ +//go:build !windows + +package rawfilelock + +import ( + "time" + + "golang.org/x/sys/unix" +) + +type fileHandle uintptr + +func openHandle(path string, mode int) (fileHandle, error) { + mode |= unix.O_CLOEXEC + fd, err := unix.Open(path, mode, 0o644) + return fileHandle(fd), err +} + +func lockHandle(fd fileHandle, lType LockType, nonblocking bool) error { + fType := unix.F_RDLCK + if lType != ReadLock { + fType = unix.F_WRLCK + } + lk := unix.Flock_t{ + Type: int16(fType), + Whence: int16(unix.SEEK_SET), + Start: 0, + Len: 0, + } + cmd := unix.F_SETLKW + if nonblocking { + cmd = unix.F_SETLK + } + for { + err := unix.FcntlFlock(uintptr(fd), cmd, &lk) + if err == nil || nonblocking { + return err + } + time.Sleep(10 * time.Millisecond) + } +} + +func unlockAndCloseHandle(fd fileHandle) { + unix.Close(int(fd)) +} + +func closeHandle(fd fileHandle) { + unix.Close(int(fd)) +} diff --git a/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_windows.go b/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_windows.go new file mode 100644 index 00000000..9c0d692f --- /dev/null +++ b/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_windows.go @@ -0,0 +1,48 @@ +//go:build windows + +package rawfilelock + +import ( + "golang.org/x/sys/windows" +) + +const ( + reserved = 0 + allBytes = ^uint32(0) +) + +type fileHandle windows.Handle + +func openHandle(path string, mode int) (fileHandle, error) { + mode |= windows.O_CLOEXEC + fd, err := windows.Open(path, mode, windows.S_IWRITE) + return fileHandle(fd), err +} + +func lockHandle(fd fileHandle, lType LockType, nonblocking bool) error { + flags := 0 + if lType != ReadLock { + flags = windows.LOCKFILE_EXCLUSIVE_LOCK + } + if nonblocking { + flags |= windows.LOCKFILE_FAIL_IMMEDIATELY + } + ol := new(windows.Overlapped) + if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil { + if nonblocking { + return err + } + panic(err) + } + return nil +} + +func unlockAndCloseHandle(fd fileHandle) { + ol := new(windows.Overlapped) + windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol) + closeHandle(fd) +} + +func closeHandle(fd fileHandle) { + windows.Close(windows.Handle(fd)) +} diff --git a/vendor/github.com/containers/storage/internal/staging_lockfile/staging_lockfile.go b/vendor/github.com/containers/storage/internal/staging_lockfile/staging_lockfile.go new file mode 100644 index 00000000..1cb2a332 --- /dev/null +++ b/vendor/github.com/containers/storage/internal/staging_lockfile/staging_lockfile.go @@ -0,0 +1,147 @@ +package staging_lockfile + +import ( + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/containers/storage/internal/rawfilelock" +) + +// StagingLockFile represents a file lock used to coordinate access to staging areas. +// Typical usage is via CreateAndLock or TryLockPath, both of which return a StagingLockFile +// that must eventually be released with UnlockAndDelete. This ensures that access +// to the staging file is properly synchronized both within and across processes. +// +// WARNING: This struct MUST NOT be created manually. Use the provided helper functions instead. +type StagingLockFile struct { + // Locking invariant: If stagingLockFileLock is not locked, a StagingLockFile for a particular + // path exists if the current process currently owns the lock for that file, and it is recorded in stagingLockFiles. + // + // The following fields can only be accessed by the goroutine owning the lock. + // + // An empty string in the file field means that the lock has been released and the StagingLockFile is no longer valid. + file string // Also the key in stagingLockFiles + fd rawfilelock.FileHandle +} + +const maxRetries = 1000 + +var ( + stagingLockFiles map[string]*StagingLockFile + stagingLockFileLock sync.Mutex +) + +// tryAcquireLockForFile attempts to acquire a lock for the specified file path. +func tryAcquireLockForFile(path string) (*StagingLockFile, error) { + cleanPath, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err) + } + + stagingLockFileLock.Lock() + defer stagingLockFileLock.Unlock() + + if stagingLockFiles == nil { + stagingLockFiles = make(map[string]*StagingLockFile) + } + + if _, ok := stagingLockFiles[cleanPath]; ok { + return nil, fmt.Errorf("lock %q is used already with other thread", cleanPath) + } + + fd, err := rawfilelock.OpenLock(cleanPath, false) + if err != nil { + return nil, err + } + + if err = rawfilelock.TryLockFile(fd, rawfilelock.WriteLock); err != nil { + // Lock acquisition failed, but holding stagingLockFileLock ensures + // no other goroutine in this process could have obtained a lock for this file, + // so closing it is still safe. + rawfilelock.CloseHandle(fd) + return nil, fmt.Errorf("failed to acquire lock on %q: %w", cleanPath, err) + } + + lockFile := &StagingLockFile{ + file: cleanPath, + fd: fd, + } + + stagingLockFiles[cleanPath] = lockFile + return lockFile, nil +} + +// UnlockAndDelete releases the lock, removes the associated file from the filesystem. +// +// WARNING: After this operation, the StagingLockFile becomes invalid for further use. +func (l *StagingLockFile) UnlockAndDelete() error { + stagingLockFileLock.Lock() + defer stagingLockFileLock.Unlock() + + if l.file == "" { + // Panic when unlocking an unlocked lock. That's a violation + // of the lock semantics and will reveal such. + panic("calling Unlock on unlocked lock") + } + + defer func() { + // It’s important that this happens while we are still holding stagingLockFileLock, to ensure + // that no other goroutine has l.file open = that this close is not unlocking the lock under any + // other goroutine. (defer ordering is LIFO, so this will happen before we release the stagingLockFileLock) + rawfilelock.UnlockAndCloseHandle(l.fd) + delete(stagingLockFiles, l.file) + l.file = "" + }() + if err := os.Remove(l.file); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// CreateAndLock creates a new temporary file in the specified directory with the given pattern, +// then creates and locks a StagingLockFile for it. The file is created using os.CreateTemp. +// Typically, the caller would use the returned lock file path to derive a path to the lock-controlled resource +// (e.g. by replacing the "pattern" part of the returned file name with a different prefix) +// Caller MUST call UnlockAndDelete() on the returned StagingLockFile to release the lock and delete the file. +// +// Returns: +// - The locked StagingLockFile +// - The name of created lock file +// - Any error that occurred during the process +// +// If the file cannot be locked, this function will retry up to maxRetries times before failing. +func CreateAndLock(dir string, pattern string) (*StagingLockFile, string, error) { + for try := 0; ; try++ { + file, err := os.CreateTemp(dir, pattern) + if err != nil { + return nil, "", err + } + file.Close() + + path := file.Name() + l, err := tryAcquireLockForFile(path) + if err != nil { + if try < maxRetries { + continue // Retry if the lock cannot be acquired + } + return nil, "", fmt.Errorf( + "failed to allocate lock in %q after %d attempts; last failure on %q: %w", + dir, try, filepath.Base(path), err, + ) + } + + return l, filepath.Base(path), nil + } +} + +// TryLockPath attempts to acquire a lock on an specific path. If the file does not exist, +// it will be created. +// +// Warning: If acquiring a lock is successful, it returns a new StagingLockFile +// instance for the file. Caller MUST call UnlockAndDelete() on the returned StagingLockFile +// to release the lock and delete the file. +func TryLockPath(path string) (*StagingLockFile, error) { + return tryAcquireLockForFile(path) +} diff --git a/vendor/github.com/containers/storage/internal/tempdir/tempdir.go b/vendor/github.com/containers/storage/internal/tempdir/tempdir.go new file mode 100644 index 00000000..bb6d3561 --- /dev/null +++ b/vendor/github.com/containers/storage/internal/tempdir/tempdir.go @@ -0,0 +1,243 @@ +package tempdir + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/internal/staging_lockfile" + "github.com/sirupsen/logrus" +) + +/* +Locking rules and invariants for TempDir and its recovery mechanism: + +1. TempDir Instance Locks: + - Path: 'RootDir/lock-XYZ' (in the root directory) + - Each TempDir instance creates and holds an exclusive lock on this file immediately + during NewTempDir() initialization. + - This lock signifies that the temporary directory is in active use by the + process/goroutine that holds the TempDir object. + +2. Stale Directory Recovery (separate operation): + - RecoverStaleDirs() can be called independently to identify and clean up stale + temporary directories. + - For each potential stale directory (found by listPotentialStaleDirs), it + attempts to TryLockPath() its instance lock file. + - If TryLockPath() succeeds: The directory is considered stale, and both the + directory and lock file are removed. + - If TryLockPath() fails: The directory is considered in active use by another + process/goroutine, and it's skipped. + +3. TempDir Usage: + - NewTempDir() immediately creates both the instance lock and the temporary directory. + - TempDir.StageDeletion() moves files into the existing temporary directory with counter-based naming. + - Files moved into the temporary directory are renamed with a counter-based prefix + to ensure uniqueness (e.g., "0-filename", "1-filename"). + - Once cleaned up, the TempDir instance cannot be reused - StageDeletion() will return an error. + +4. Cleanup Process: + - TempDir.Cleanup() removes both the temporary directory and its lock file. + - The instance lock is unlocked and deleted after cleanup operations are complete. + - The TempDir instance becomes inactive after cleanup (internal fields are reset). + - The TempDir instance cannot be reused after Cleanup() - StageDeletion() will fail. + +5. TempDir Lifetime: + - NewTempDir() creates both the TempDir manager and the actual temporary directory immediately. + - The temporary directory is created eagerly during NewTempDir(). + - During its lifetime, the temporary directory is protected by its instance lock. + - The temporary directory exists until Cleanup() is called, which removes both + the directory and its lock file. + - Multiple TempDir instances can coexist in the same RootDir, each with its own + unique subdirectory and lock. + - After cleanup, the TempDir instance cannot be reused. + +6. Example Directory Structure: + + RootDir/ + lock-ABC (instance lock for temp-dir-ABC) + temp-dir-ABC/ + 0-file1 + 1-file3 + lock-XYZ (instance lock for temp-dir-XYZ) + temp-dir-XYZ/ + 0-file2 +*/ +const ( + // tempDirPrefix is the prefix used for creating temporary directories. + tempDirPrefix = "temp-dir-" + // tempdirLockPrefix is the prefix used for creating lock files for temporary directories. + tempdirLockPrefix = "lock-" +) + +// TempDir represents a temporary directory that is created in a specified root directory. +// It manages the lifecycle of the temporary directory, including creation, locking, and cleanup. +// Each TempDir instance is associated with a unique subdirectory in the root directory. +// Warning: The TempDir instance should be used in a single goroutine. +type TempDir struct { + RootDir string + + tempDirPath string + // tempDirLock is a lock file (e.g., RootDir/lock-XYZ) specific to this + // TempDir instance, indicating it's in active use. + tempDirLock *staging_lockfile.StagingLockFile + tempDirLockPath string + + // counter is used to generate unique filenames for added files. + counter uint64 +} + +// CleanupTempDirFunc is a function type that can be returned by operations +// which need to perform cleanup actions later. +type CleanupTempDirFunc func() error + +// listPotentialStaleDirs scans the RootDir for directories that might be stale temporary directories. +// It identifies directories with the tempDirPrefix and their corresponding lock files with the tempdirLockPrefix. +// The function returns a map of IDs that correspond to both directories and lock files found. +// These IDs are extracted from the filenames by removing their respective prefixes. +func listPotentialStaleDirs(rootDir string) (map[string]struct{}, error) { + ids := make(map[string]struct{}) + + dirContent, err := os.ReadDir(rootDir) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, fmt.Errorf("error reading temp dir %s: %w", rootDir, err) + } + + for _, entry := range dirContent { + if id, ok := strings.CutPrefix(entry.Name(), tempDirPrefix); ok { + ids[id] = struct{}{} + continue + } + + if id, ok := strings.CutPrefix(entry.Name(), tempdirLockPrefix); ok { + ids[id] = struct{}{} + } + } + return ids, nil +} + +// RecoverStaleDirs identifies and removes stale temporary directories in the root directory. +// A directory is considered stale if its lock file can be acquired (indicating no active use). +// The function attempts to remove both the directory and its lock file. +// If a directory's lock cannot be acquired, it is considered in use and is skipped. +func RecoverStaleDirs(rootDir string) error { + potentialStaleDirs, err := listPotentialStaleDirs(rootDir) + if err != nil { + return fmt.Errorf("error listing potential stale temp dirs in %s: %w", rootDir, err) + } + + if len(potentialStaleDirs) == 0 { + return nil + } + + var recoveryErrors []error + + for id := range potentialStaleDirs { + lockPath := filepath.Join(rootDir, tempdirLockPrefix+id) + tempDirPath := filepath.Join(rootDir, tempDirPrefix+id) + + // Try to lock the lock file. If it can be locked, the directory is stale. + instanceLock, err := staging_lockfile.TryLockPath(lockPath) + if err != nil { + continue + } + + if rmErr := os.RemoveAll(tempDirPath); rmErr != nil && !os.IsNotExist(rmErr) { + recoveryErrors = append(recoveryErrors, fmt.Errorf("error removing stale temp dir %s: %w", tempDirPath, rmErr)) + } + if unlockErr := instanceLock.UnlockAndDelete(); unlockErr != nil { + recoveryErrors = append(recoveryErrors, fmt.Errorf("error unlocking and deleting stale lock file %s: %w", lockPath, unlockErr)) + } + } + + return errors.Join(recoveryErrors...) +} + +// NewTempDir creates a TempDir and immediately creates both the temporary directory +// and its corresponding lock file in the specified RootDir. +// The RootDir itself will be created if it doesn't exist. +// Note: The caller MUST ensure that returned TempDir instance is cleaned up with .Cleanup(). +func NewTempDir(rootDir string) (*TempDir, error) { + if err := os.MkdirAll(rootDir, 0o700); err != nil { + return nil, fmt.Errorf("creating root temp directory %s failed: %w", rootDir, err) + } + + td := &TempDir{ + RootDir: rootDir, + } + tempDirLock, tempDirLockFileName, err := staging_lockfile.CreateAndLock(td.RootDir, tempdirLockPrefix) + if err != nil { + return nil, fmt.Errorf("creating and locking temp dir instance lock in %s failed: %w", td.RootDir, err) + } + td.tempDirLock = tempDirLock + td.tempDirLockPath = filepath.Join(td.RootDir, tempDirLockFileName) + + // Create the temporary directory that corresponds to the lock file + id := strings.TrimPrefix(tempDirLockFileName, tempdirLockPrefix) + actualTempDirPath := filepath.Join(td.RootDir, tempDirPrefix+id) + if err := os.MkdirAll(actualTempDirPath, 0o700); err != nil { + return nil, fmt.Errorf("creating temp directory %s failed: %w", actualTempDirPath, err) + } + td.tempDirPath = actualTempDirPath + td.counter = 0 + return td, nil +} + +// StageDeletion moves the specified file into the instance's temporary directory. +// The temporary directory must already exist (created during NewTempDir). +// Files are renamed with a counter-based prefix (e.g., "0-filename", "1-filename") to ensure uniqueness. +// Note: 'path' must be on the same filesystem as the TempDir for os.Rename to work. +// The caller MUST ensure .Cleanup() is called. +// If the TempDir has been cleaned up, this method will return an error. +func (td *TempDir) StageDeletion(path string) error { + if td.tempDirLock == nil { + return fmt.Errorf("temp dir instance not initialized or already cleaned up") + } + fileName := fmt.Sprintf("%d-", td.counter) + filepath.Base(path) + destPath := filepath.Join(td.tempDirPath, fileName) + td.counter++ + return os.Rename(path, destPath) +} + +// Cleanup removes the temporary directory and releases its instance lock. +// After cleanup, the TempDir instance becomes inactive and cannot be reused. +// Subsequent calls to StageDeletion() will fail. +// Multiple calls to Cleanup() are safe and will not return an error. +// Callers should typically defer Cleanup() to run after any application-level +// global locks are released to avoid holding those locks during potentially +// slow disk I/O. +func (td *TempDir) Cleanup() error { + if td.tempDirLock == nil { + logrus.Debug("Temp dir already cleaned up") + return nil + } + + if err := os.RemoveAll(td.tempDirPath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("removing temp dir %s failed: %w", td.tempDirPath, err) + } + + lock := td.tempDirLock + td.tempDirPath = "" + td.tempDirLock = nil + td.tempDirLockPath = "" + return lock.UnlockAndDelete() +} + +// CleanupTemporaryDirectories cleans up multiple temporary directories by calling their cleanup functions. +func CleanupTemporaryDirectories(cleanFuncs ...CleanupTempDirFunc) error { + var cleanupErrors []error + for _, cleanupFunc := range cleanFuncs { + if cleanupFunc == nil { + continue + } + if err := cleanupFunc(); err != nil { + cleanupErrors = append(cleanupErrors, err) + } + } + return errors.Join(cleanupErrors...) +} diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 4769e581..4d9e0b88 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "maps" + "math/bits" "os" "path" "path/filepath" @@ -17,6 +18,7 @@ import ( "time" drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/internal/tempdir" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" @@ -26,7 +28,6 @@ import ( "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/tarlog" "github.com/containers/storage/pkg/truncindex" - multierror "github.com/hashicorp/go-multierror" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux" @@ -38,6 +39,8 @@ import ( const ( tarSplitSuffix = ".tar-split.gz" + // tempDirPath is the subdirectory name used for storing temporary directories during layer deletion + tempDirPath = "tmp" incompleteFlag = "incomplete" // maxLayerStoreCleanupIterations is the number of times we try to clean up inconsistent layer store state // in readers (which, for implementation reasons, gives other writers the opportunity to create more inconsistent state) @@ -47,11 +50,13 @@ const ( type layerLocations uint8 -// The backing store is split in two json files, one (the volatile) -// that is written without fsync() meaning it isn't as robust to -// unclean shutdown +// The backing store is split in three json files. +// The volatile store is written without fsync() meaning it isn't as robust to unclean shutdown. +// Optionally, an image store can be configured to store RO layers. +// The stable store is used for the remaining layers that don't go into the other stores. const ( stableLayerLocation layerLocations = 1 << iota + imageStoreLayerLocation volatileLayerLocation numLayerLocationIndex = iota @@ -61,6 +66,10 @@ func layerLocationFromIndex(index int) layerLocations { return 1 << index } +func indexFromLayerLocation(location layerLocations) int { + return bits.TrailingZeros(uint(location)) +} + // A Layer is a record of a copy-on-write layer that's stored by the lower // level graph driver. type Layer struct { @@ -155,7 +164,7 @@ type Layer struct { GIDs []uint32 `json:"gidset,omitempty"` // Flags is arbitrary data about the layer. - Flags map[string]interface{} `json:"flags,omitempty"` + Flags map[string]any `json:"flags,omitempty"` // UIDMap and GIDMap are used for setting up a layer's contents // for use inside of a user namespace where UID mapping is being used. @@ -165,8 +174,8 @@ type Layer struct { // ReadOnly is true if this layer resides in a read-only layer store. ReadOnly bool `json:"-"` - // volatileStore is true if the container is from the volatile json file - volatileStore bool `json:"-"` + // location is the location of the store where the layer is present. + location layerLocations `json:"-"` // BigDataNames is a list of names of data items that we keep for the // convenience of the caller. They can be large, and are only in @@ -284,8 +293,14 @@ type rwLayerStore interface { // updateNames modifies names associated with a layer based on (op, names). updateNames(id string, names []string, op updateNameOperation) error - // Delete deletes a layer with the specified name or ID. - Delete(id string) error + // deleteWhileHoldingLock deletes a layer with the specified name or ID. + deleteWhileHoldingLock(id string) error + + // deferredDelete deletes a layer with the specified name or ID. + // This removal happen immediately (the layer is no longer usable), + // but physically deleting the files may be deferred. + // Caller MUST call all returned cleanup functions outside of the locks. + deferredDelete(id string) ([]tempdir.CleanupTempDirFunc, error) // Wipe deletes all layers. Wipe() error @@ -336,6 +351,9 @@ type rwLayerStore interface { // Clean up unreferenced layers GarbageCollect() error + + // Dedup deduplicates layers in the store. + dedup(drivers.DedupArgs) (drivers.DedupResult, error) } type multipleLockFile struct { @@ -428,14 +446,6 @@ type layerStore struct { driver drivers.Driver } -// The caller must hold r.inProcessLock for reading. -func layerLocation(l *Layer) layerLocations { - if l.volatileStore { - return volatileLayerLocation - } - return stableLayerLocation -} - func copyLayer(l *Layer) *Layer { return &Layer{ ID: l.ID, @@ -453,7 +463,7 @@ func copyLayer(l *Layer) *Layer { TOCDigest: l.TOCDigest, CompressionType: l.CompressionType, ReadOnly: l.ReadOnly, - volatileStore: l.volatileStore, + location: l.location, BigDataNames: copySlicePreferringNil(l.BigDataNames), Flags: copyMapPreferringNil(l.Flags), UIDMap: copySlicePreferringNil(l.UIDMap), @@ -655,8 +665,12 @@ func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) { // If the layers.json file or container-layers.json has been // modified manually, then we have to reload the storage in // any case. - for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { - info, err := os.Stat(r.jsonPath[locationIndex]) + for locationIndex := range numLayerLocationIndex { + rpath := r.jsonPath[locationIndex] + if rpath == "" { + continue + } + info, err := os.Stat(rpath) if err != nil && !os.IsNotExist(err) { return lockfile.LastWrite{}, false, fmt.Errorf("stat layers file: %w", err) } @@ -789,9 +803,23 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { layers := []*Layer{} ids := make(map[string]*Layer) - for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { + if r.lockfile.IsReadWrite() { + if err := tempdir.RecoverStaleDirs(filepath.Join(r.layerdir, tempDirPath)); err != nil { + return false, err + } + for _, driverTempDirPath := range r.driver.GetTempDirRootDirs() { + if err := tempdir.RecoverStaleDirs(driverTempDirPath); err != nil { + return false, err + } + } + } + + for locationIndex := range numLayerLocationIndex { location := layerLocationFromIndex(locationIndex) rpath := r.jsonPath[locationIndex] + if rpath == "" { + continue + } info, err := os.Stat(rpath) if err != nil { if !os.IsNotExist(err) { @@ -818,9 +846,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { continue // skip invalid duplicated layer } // Remember where the layer came from - if location == volatileLayerLocation { - layer.volatileStore = true - } + layer.location = location layers = append(layers, layer) ids[layer.ID] = layer } @@ -841,7 +867,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { if conflict, ok := names[name]; ok { r.removeName(conflict, name) errorToResolveBySaving = ErrDuplicateLayerNames - modifiedLocations |= layerLocation(conflict) + modifiedLocations |= conflict.location } names[name] = layers[n] } @@ -916,7 +942,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { var layersToDelete []*Layer for _, layer := range r.layers { if layer.Flags == nil { - layer.Flags = make(map[string]interface{}) + layer.Flags = make(map[string]any) } if layerHasIncompleteFlag(layer) { // Important: Do not call r.deleteInternal() here. It modifies r.layers @@ -929,15 +955,20 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { // Now actually delete the layers for _, layer := range layersToDelete { logrus.Warnf("Found incomplete layer %q, deleting it", layer.ID) - err := r.deleteInternal(layer.ID) + cleanFunctions, err := r.internalDelete(layer.ID) + defer func() { + if err := tempdir.CleanupTemporaryDirectories(cleanFunctions...); err != nil { + logrus.Errorf("Error cleaning up temporary directories: %v", err) + } + }() if err != nil { // Don't return the error immediately, because deleteInternal does not saveLayers(); // Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully // deleted incomplete layers have their metadata correctly removed. - incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors, + incompleteDeletionErrors = errors.Join(incompleteDeletionErrors, fmt.Errorf("deleting layer %#v: %w", layer.ID, err)) } - modifiedLocations |= layerLocation(layer) + modifiedLocations |= layer.location } if err := r.saveLayers(modifiedLocations); err != nil { return false, err @@ -1006,7 +1037,7 @@ func (r *layerStore) save(saveLocations layerLocations) error { // The caller must hold r.lockfile locked for writing. // The caller must hold r.inProcessLock for WRITING. func (r *layerStore) saveFor(modifiedLayer *Layer) error { - return r.save(layerLocation(modifiedLayer)) + return r.save(modifiedLayer.location) } // The caller must hold r.lockfile locked for writing. @@ -1025,18 +1056,21 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error { } r.lastWrite = lw - for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ { + for locationIndex := range numLayerLocationIndex { location := layerLocationFromIndex(locationIndex) if location&saveLocations == 0 { continue } rpath := r.jsonPath[locationIndex] + if rpath == "" { + return fmt.Errorf("internal error: no path for location %v", location) + } if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil { return err } subsetLayers := make([]*Layer, 0, len(r.layers)) for _, layer := range r.layers { - if layerLocation(layer) == location { + if layer.location == location { subsetLayers = append(subsetLayers, layer) } } @@ -1136,12 +1170,17 @@ func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers. if transient { volatileDir = rundir } + layersImageDir := "" + if imagedir != "" { + layersImageDir = filepath.Join(imagedir, "layers.json") + } rlstore := layerStore{ lockfile: newMultipleLockFile(lockFiles...), mountsLockfile: mountsLockfile, rundir: rundir, jsonPath: [numLayerLocationIndex]string{ filepath.Join(layerdir, "layers.json"), + layersImageDir, filepath.Join(volatileDir, "volatile-layers.json"), }, layerdir: layerdir, @@ -1179,6 +1218,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL rundir: rundir, jsonPath: [numLayerLocationIndex]string{ filepath.Join(layerdir, "layers.json"), + "", filepath.Join(layerdir, "volatile-layers.json"), }, layerdir: layerdir, @@ -1246,7 +1286,7 @@ func (r *layerStore) ClearFlag(id string, flag string) error { } // Requires startWriting. -func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { +func (r *layerStore) SetFlag(id string, flag string, value any) error { if !r.lockfile.IsReadWrite() { return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } @@ -1255,7 +1295,7 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { return ErrLayerUnknown } if layer.Flags == nil { - layer.Flags = make(map[string]interface{}) + layer.Flags = make(map[string]any) } layer.Flags[flag] = value return r.saveFor(layer) @@ -1319,7 +1359,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID) } if err := r.saveFor(layer); err != nil { - if e := r.Delete(layer.ID); e != nil { + if e := r.deleteWhileHoldingLock(layer.ID); e != nil { logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e) } return nil, err @@ -1327,6 +1367,17 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s return copyLayer(layer), nil } +func (r *layerStore) pickStoreLocation(volatile, writeable bool) layerLocations { + switch { + case volatile: + return volatileLayerLocation + case !writeable && r.jsonPath[indexFromLayerLocation(imageStoreLayerLocation)] != "": + return imageStoreLayerLocation + default: + return stableLayerLocation + } +} + // Requires startWriting. func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) { if moreOptions == nil { @@ -1419,7 +1470,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount UIDMap: copySlicePreferringNil(moreOptions.UIDMap), GIDMap: copySlicePreferringNil(moreOptions.GIDMap), BigDataNames: []string{}, - volatileStore: moreOptions.Volatile, + location: r.pickStoreLocation(moreOptions.Volatile, writeable), } layer.Flags[incompleteFlag] = true @@ -1443,7 +1494,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount if cleanupFailureContext == "" { cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site" } - if e := r.Delete(id); e != nil { + if e := r.deleteWhileHoldingLock(id); e != nil { logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, id, e) } } @@ -1608,7 +1659,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) options.MountLabel = layer.MountLabel } - if (options.UidMaps != nil || options.GidMaps != nil) && !r.driver.SupportsShifting() { + if (options.UidMaps != nil || options.GidMaps != nil) && !r.driver.SupportsShifting(options.UidMaps, options.GidMaps) { if !reflect.DeepEqual(options.UidMaps, layer.UIDMap) || !reflect.DeepEqual(options.GidMaps, layer.GIDMap) { return "", fmt.Errorf("cannot mount layer %v: shifting not enabled", layer.ID) } @@ -1894,31 +1945,47 @@ func layerHasIncompleteFlag(layer *Layer) bool { } // Requires startWriting. -func (r *layerStore) deleteInternal(id string) error { +// Caller MUST run all returned cleanup functions after this, EVEN IF the function returns an error. +// Ideally outside of the startWriting. +func (r *layerStore) internalDelete(id string) ([]tempdir.CleanupTempDirFunc, error) { if !r.lockfile.IsReadWrite() { - return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) + return nil, fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { - return ErrLayerUnknown + return nil, ErrLayerUnknown } // Ensure that if we are interrupted, the layer will be cleaned up. if !layerHasIncompleteFlag(layer) { if layer.Flags == nil { - layer.Flags = make(map[string]interface{}) + layer.Flags = make(map[string]any) } layer.Flags[incompleteFlag] = true if err := r.saveFor(layer); err != nil { - return err + return nil, err } } // We never unset incompleteFlag; below, we remove the entire object from r.layers. + tempDirectory, err := tempdir.NewTempDir(filepath.Join(r.layerdir, tempDirPath)) + cleanFunctions := []tempdir.CleanupTempDirFunc{} + cleanFunctions = append(cleanFunctions, tempDirectory.Cleanup) + if err != nil { + return nil, err + } id = layer.ID - if err := r.driver.Remove(id); err != nil && !errors.Is(err, os.ErrNotExist) { - return err + cleanFunc, err := r.driver.DeferredRemove(id) + cleanFunctions = append(cleanFunctions, cleanFunc) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return cleanFunctions, err + } + + cleanFunctions = append(cleanFunctions, tempDirectory.Cleanup) + if err := tempDirectory.StageDeletion(r.tspath(id)); err != nil && !errors.Is(err, os.ErrNotExist) { + return cleanFunctions, err + } + if err := tempDirectory.StageDeletion(r.datadir(id)); err != nil && !errors.Is(err, os.ErrNotExist) { + return cleanFunctions, err } - os.Remove(r.tspath(id)) - os.RemoveAll(r.datadir(id)) delete(r.byid, id) for _, name := range layer.Names { delete(r.byname, name) @@ -1942,7 +2009,7 @@ func (r *layerStore) deleteInternal(id string) error { }) { selinux.ReleaseLabel(mountLabel) } - return nil + return cleanFunctions, nil } // Requires startWriting. @@ -1962,10 +2029,20 @@ func (r *layerStore) deleteInDigestMap(id string) { } // Requires startWriting. -func (r *layerStore) Delete(id string) error { +// This is soft-deprecated and should not have any new callers; use deferredDelete instead. +func (r *layerStore) deleteWhileHoldingLock(id string) error { + cleanupFunctions, deferErr := r.deferredDelete(id) + cleanupErr := tempdir.CleanupTemporaryDirectories(cleanupFunctions...) + return errors.Join(deferErr, cleanupErr) +} + +// Requires startWriting. +// Caller MUST run all returned cleanup functions after this, EVEN IF the function returns an error. +// Ideally outside of the startWriting. +func (r *layerStore) deferredDelete(id string) ([]tempdir.CleanupTempDirFunc, error) { layer, ok := r.lookup(id) if !ok { - return ErrLayerUnknown + return nil, ErrLayerUnknown } id = layer.ID // The layer may already have been explicitly unmounted, but if not, we @@ -1977,13 +2054,14 @@ func (r *layerStore) Delete(id string) error { break } if err != nil { - return err + return nil, err } } - if err := r.deleteInternal(id); err != nil { - return err + cleanFunctions, err := r.internalDelete(id) + if err != nil { + return cleanFunctions, err } - return r.saveFor(layer) + return cleanFunctions, r.saveFor(layer) } // Requires startReading or startWriting. @@ -2013,7 +2091,7 @@ func (r *layerStore) Wipe() error { return r.byid[ids[i]].Created.After(r.byid[ids[j]].Created) }) for _, id := range ids { - if err := r.Delete(id); err != nil { + if err := r.deleteWhileHoldingLock(id); err != nil { return err } } @@ -2253,33 +2331,33 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, // but they modify in-memory state. fgetter, err := r.newFileGetter(to) if err != nil { - errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err)) + errs := fmt.Errorf("creating file-getter: %w", err) if err := decompressor.Close(); err != nil { - errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err)) + errs = errors.Join(errs, fmt.Errorf("closing decompressor: %w", err)) } if err := tsfile.Close(); err != nil { - errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err)) + errs = errors.Join(errs, fmt.Errorf("closing tarstream headers: %w", err)) } - return nil, errs.ErrorOrNil() + return nil, errs } tarstream := asm.NewOutputTarStream(fgetter, metadata) rc := ioutils.NewReadCloserWrapper(tarstream, func() error { - var errs *multierror.Error + var errs error if err := decompressor.Close(); err != nil { - errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err)) + errs = errors.Join(errs, fmt.Errorf("closing decompressor: %w", err)) } if err := tsfile.Close(); err != nil { - errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err)) + errs = errors.Join(errs, fmt.Errorf("closing tarstream headers: %w", err)) } if err := tarstream.Close(); err != nil { - errs = multierror.Append(errs, fmt.Errorf("closing reconstructed tarstream: %w", err)) + errs = errors.Join(errs, fmt.Errorf("closing reconstructed tarstream: %w", err)) } if err := fgetter.Close(); err != nil { - errs = multierror.Append(errs, fmt.Errorf("closing file-getter: %w", err)) + errs = errors.Join(errs, fmt.Errorf("closing file-getter: %w", err)) } if errs != nil { - return errs.ErrorOrNil() + return errs } return nil }) @@ -2449,16 +2527,12 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, for uid := range uidLog { layer.UIDs = append(layer.UIDs, uid) } - sort.Slice(layer.UIDs, func(i, j int) bool { - return layer.UIDs[i] < layer.UIDs[j] - }) + slices.Sort(layer.UIDs) layer.GIDs = make([]uint32, 0, len(gidLog)) for gid := range gidLog { layer.GIDs = append(layer.GIDs, gid) } - sort.Slice(layer.GIDs, func(i, j int) bool { - return layer.GIDs[i] < layer.GIDs[j] - }) + slices.Sort(layer.GIDs) err = r.saveFor(layer) @@ -2514,7 +2588,7 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver layer.Metadata = diffOutput.Metadata if options != nil && options.Flags != nil { if layer.Flags == nil { - layer.Flags = make(map[string]interface{}) + layer.Flags = make(map[string]any) } maps.Copy(layer.Flags, options.Flags) } @@ -2528,10 +2602,14 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver if err != nil { compressor = pgzip.NewWriter(&tsdata) } + if _, err := diffOutput.TarSplit.Seek(0, io.SeekStart); err != nil { + return err + } + if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err) } - if _, err := compressor.Write(diffOutput.TarSplit); err != nil { + if _, err := diffOutput.TarSplit.WriteTo(compressor); err != nil { compressor.Close() return err } @@ -2545,7 +2623,7 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver } for k, v := range diffOutput.BigData { if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil { - if err2 := r.Delete(id); err2 != nil { + if err2 := r.deleteWhileHoldingLock(id); err2 != nil { logrus.Errorf("While recovering from a failure to set big data, error deleting layer %#v: %v", id, err2) } return err @@ -2601,6 +2679,11 @@ func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) { return r.layersByDigestMap(r.bytocsum, d) } +// Requires startWriting. +func (r *layerStore) dedup(req drivers.DedupArgs) (drivers.DedupResult, error) { + return r.driver.Dedup(req) +} + func closeAll(closes ...func() error) (rErr error) { for _, f := range closes { if err := f(); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 13a45895..0de67266 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -16,6 +16,7 @@ import ( "strings" "sync" "syscall" + "time" "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" @@ -52,7 +53,7 @@ type ( // This is additional data to be used by the converter. It will // not survive a round trip through JSON, so it's primarily // intended for generating archives (i.e., converting writes). - WhiteoutData interface{} + WhiteoutData any // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool @@ -67,6 +68,8 @@ type ( CopyPass bool // ForceMask, if set, indicates the permission mask used for created files. ForceMask *os.FileMode + // Timestamp, if set, will be set in each header as create/mod/access time + Timestamp *time.Time } ) @@ -80,7 +83,7 @@ const ( freebsd = "freebsd" ) -var xattrsToIgnore = map[string]interface{}{ +var xattrsToIgnore = map[string]any{ "security.selinux": true, } @@ -178,6 +181,7 @@ func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) { defer func() { if Err != nil { + // In the normal case, the buffer is embedded in the ReadCloser return. p.Put(buf) } }() @@ -374,7 +378,7 @@ type nosysFileInfo struct { os.FileInfo } -func (fi nosysFileInfo) Sys() interface{} { +func (fi nosysFileInfo) Sys() any { // A Sys value of type *tar.Header is safe as it is system-independent. // The tar.FileInfoHeader function copies the fields into the returned // header without performing any OS lookups. @@ -427,7 +431,7 @@ func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error { } for _, xattr := range []string{"security.capability", "security.ima"} { capability, err := system.Lgetxattr(path, xattr) - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err) } if capability != nil { @@ -440,7 +444,7 @@ func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error { // readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header func readUserXattrToTarHeader(path string, hdr *tar.Header) error { xattrs, err := system.Llistxattr(path) - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { return err } for _, key := range xattrs { @@ -474,7 +478,7 @@ type TarWhiteoutConverter interface { ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error) } -type tarAppender struct { +type tarWriter struct { TarWriter *tar.Writer Buffer *bufio.Writer @@ -493,15 +497,19 @@ type tarAppender struct { // from the traditional behavior/format to get features like subsecond // precision in timestamps. CopyPass bool + + // Timestamp, if set, will be set in each header as create/mod/access time + Timestamp *time.Time } -func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { - return &tarAppender{ +func newTarWriter(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair, timestamp *time.Time) *tarWriter { + return &tarWriter{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), IDMappings: idMapping, ChownOpts: chownOpts, + Timestamp: timestamp, } } @@ -520,11 +528,29 @@ func canonicalTarName(name string, isDir bool) (string, error) { return name, nil } -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { +type addFileData struct { + // The path from which to read contents. + path string + + // os.Stat for the above. + fi os.FileInfo + + // The file header of the above. + hdr *tar.Header + + // if present, an extra whiteout entry to write after the header. + extraWhiteout *tar.Header +} + +// prepareAddFile generates the tar file header(s) for adding a file +// from path as name to the tar archive, without writing to the +// tar stream. Thus, any error may be ignored without corrupting the +// tar file. A (nil, nil) return means that the file should be +// ignored for non-error reasons. +func (ta *tarWriter) prepareAddFile(path, name string) (*addFileData, error) { fi, err := os.Lstat(path) if err != nil { - return err + return nil, err } var link string @@ -532,26 +558,26 @@ func (ta *tarAppender) addTarFile(path, name string) error { var err error link, err = os.Readlink(path) if err != nil { - return err + return nil, err } } if fi.Mode()&os.ModeSocket != 0 { logrus.Infof("archive: skipping %q since it is a socket", path) - return nil + return nil, nil } hdr, err := FileInfoHeader(name, fi, link) if err != nil { - return err + return nil, err } if err := readSecurityXattrToTarHeader(path, hdr); err != nil { - return err + return nil, err } if err := readUserXattrToTarHeader(path, hdr); err != nil { - return err + return nil, err } if err := ReadFileFlagsToTarHeader(path, hdr); err != nil { - return err + return nil, err } if ta.CopyPass { copyPassHeader(hdr) @@ -560,18 +586,13 @@ func (ta *tarAppender) addTarFile(path, name string) error { // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { - inode, err := getInodeFromStat(fi.Sys()) - if err != nil { - return err - } + inode := getInodeFromStat(fi.Sys()) // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name + hdr.Size = 0 // This must be here for the writer math to add up! } } @@ -581,11 +602,11 @@ func (ta *tarAppender) addTarFile(path, name string) error { if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { - return err + return nil, err } hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) if err != nil { - return err + return nil, err } } @@ -599,28 +620,57 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Gname = "" } + // if override timestamp set, replace all times with this + if ta.Timestamp != nil { + hdr.ModTime = *ta.Timestamp + hdr.AccessTime = *ta.Timestamp + hdr.ChangeTime = *ta.Timestamp + } + maybeTruncateHeaderModTime(hdr) + result := &addFileData{ + path: path, + hdr: hdr, + fi: fi, + } if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + // The WhiteoutConverter suggests a generic mechanism, + // but this code is only used to convert between + // overlayfs (on-disk) and AUFS (in the tar file) + // whiteouts, and is initiated because the overlayfs + // storage driver returns OverlayWhiteoutFormat from + // Driver.getWhiteoutFormat(). + // + // For AUFS, a directory with all its contents deleted + // should be represented as a directory containing a + // magic whiteout empty regular file, hence the + // extraWhiteout header returned here. + result.extraWhiteout, err = ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { - return err + return nil, err } + } - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo + return result, nil +} + +// addFile performs the write. An error here corrupts the tar file. +func (ta *tarWriter) addFile(headers *addFileData) error { + hdr := headers.hdr + if headers.extraWhiteout != nil { + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // If we write hdr with hdr.Size > 0, we have + // to write the body before we can write the + // extraWhiteout header. This can only happen + // if the contract for WhiteoutConverter is + // not honored, so bail out. + return fmt.Errorf("tar: cannot use extra whiteout with non-empty file %s", hdr.Name) + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err } + hdr = headers.extraWhiteout } if err := ta.TarWriter.WriteHeader(hdr); err != nil { @@ -628,7 +678,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - file, err := os.Open(path) + file, err := os.Open(headers.path) if err != nil { return err } @@ -646,25 +696,37 @@ func (ta *tarAppender) addTarFile(path, name string) error { } } + if !headers.fi.IsDir() && hasHardlinks(headers.fi) { + ta.SeenFiles[getInodeFromStat(headers.fi.Sys())] = headers.hdr.Name + } + return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error { +func extractTarFileEntry(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() + typeFlag := hdr.Typeflag mask := hdrInfo.Mode() + + // update also the implementation of ForceMask in pkg/chunked if forceMask != nil { mask = *forceMask + // If we have a forceMask, force the real type to either be a directory, + // a link, or a regular file. + if typeFlag != tar.TypeDir && typeFlag != tar.TypeSymlink && typeFlag != tar.TypeLink { + typeFlag = tar.TypeReg + } } - switch hdr.Typeflag { + switch typeFlag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if fi, err := os.Lstat(path); err != nil || !fi.IsDir() { if err := os.Mkdir(path, mask); err != nil { return err } @@ -682,7 +744,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L file.Close() return err } - file.Close() + if err := file.Close(); err != nil { + return err + } case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns @@ -728,16 +792,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } - if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") { - value := idtools.Stat{ - IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}, - Mode: hdrInfo.Mode() & 0o7777, - } - if err := idtools.SetContainersOverrideXattr(path, value); err != nil { - return err - } - } - // Lchown is not supported on Windows. if Lchown && runtime.GOOS != windows { if chownOpts == nil { @@ -793,18 +847,30 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L continue } if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil { - if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. We also ignore EPERM errors if we are running in a - // user namespace. + if errors.Is(err, system.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { + // Ignore specific error cases: + // - ENOTSUP: Expected for graphdrivers lacking extended attribute support: + // - Legacy AUFS versions + // - FreeBSD with unsupported namespaces (trusted, security) + // - EPERM: Expected when operating within a user namespace + // All other errors will cause a failure. errs = append(errs, err.Error()) continue } return err } + } + if forceMask != nil && (typeFlag == tar.TypeReg || typeFlag == tar.TypeDir || runtime.GOOS == "darwin") { + value := idtools.Stat{ + IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}, + Mode: hdrInfo.Mode(), + Major: int(hdr.Devmajor), + Minor: int(hdr.Devminor), + } + if err := idtools.SetContainersOverrideXattr(path, value); err != nil { + return err + } } // We defer setting flags on directories until the end of @@ -826,183 +892,194 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } // Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. +// stream of bytes. This is a convenience wrapper for [TarWithOptions]. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { +func tarWithOptionsTo(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) { // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) + defer func() { + if err := dest.Close(); err != nil && result == nil { + result = err + } + }() pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { - return nil, err + return err } - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) + compressWriter, err := CompressStream(dest, options.Compression) if err != nil { - return nil, err + return err } - go func() { - ta := newTarAppender( - idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), - compressWriter, - options.ChownOpts, - ) - ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) - ta.CopyPass = options.CopyPass - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) - } - }() + ta := newTarWriter( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + options.Timestamp, + ) + ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + ta.CopyPass = options.CopyPass - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) + includeFiles := options.IncludeFiles + defer func() { + if err := compressWriter.Close(); err != nil && result == nil { + result = err + } + }() - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) - stat, err := os.Lstat(srcPath) - if err != nil { - return - } + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } + stat, err := os.Lstat(srcPath) + if err != nil { + return err + } - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(includeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") } - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + includeFiles = []string{base} + } - seen := make(map[string]bool) + if len(includeFiles) == 0 { + includeFiles = []string{"."} + } - for _, include := range options.IncludeFiles { - rebaseName := options.RebaseNames[include] + seen := make(map[string]bool) - walkRoot := getWalkRoot(srcPath, include) - if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { - if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } + for _, include := range includeFiles { + rebaseName := options.RebaseNames[include] - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil //nolint: nilerr - } + walkRoot := getWalkRoot(srcPath, include) + if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil //nolint: nilerr + } - skip := false + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - matches, err := pm.IsMatch(relFilePath) - if err != nil { - return fmt.Errorf("matching %s: %w", relFilePath, err) - } - skip = matches + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + matches, err := pm.IsMatch(relFilePath) + if err != nil { + return fmt.Errorf("matching %s: %w", relFilePath, err) } + skip = matches + } - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (e.g. !dir/file) that starts with this - // dir. If so then we can't skip this dir. + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. - // Its not a dir then so we can just return/skip. - if !d.IsDir() { - return nil - } + // Its not a dir then so we can just return/skip. + if !d.IsDir() { + return nil + } - // No exceptions (!...) in patterns so just skip dir - if !pm.Exclusions() { - return filepath.SkipDir - } + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } - dirSlash := relFilePath + string(filepath.Separator) + dirSlash := relFilePath + string(filepath.Separator) - for _, pat := range pm.Patterns() { - if !pat.Exclusion() { - continue - } - if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { - // found a match - so can't skip this dir - return nil - } + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir } - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } - relFilePath = strings.Replace(relFilePath, include, replacement, 1) + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName } - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + headers, err := ta.prepareAddFile(filePath, relFilePath) + if err != nil { + logrus.Errorf("Can't add file %s to tar: %s; skipping", filePath, err) + } else if headers != nil { + if err := ta.addFile(headers); err != nil { + return err } - return nil - }); err != nil { - logrus.Errorf("%s", err) - return } + return nil + }); err != nil { + return err + } + } + return ta.TarWriter.Close() +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +// +// If used on a file system being modified concurrently, +// TarWithOptions will create a valid tar archive, but may leave out +// some files. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + pipeReader, pipeWriter := io.Pipe() + go func() { + err := tarWithOptionsTo(pipeWriter, srcPath, options) + if pipeErr := pipeWriter.CloseWithError(err); pipeErr != nil { + logrus.Errorf("Can't close pipe writer: %s", pipeErr) } }() @@ -1099,7 +1176,7 @@ loop: continue } - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { if err := os.RemoveAll(path); err != nil { return err } @@ -1126,7 +1203,7 @@ loop: chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} } - if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + if err = extractTarFileEntry(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { return err } @@ -1149,11 +1226,11 @@ loop: } if options.ForceMask != nil { - value := idtools.Stat{Mode: 0o755} + value := idtools.Stat{Mode: os.ModeDir | os.FileMode(0o755)} if rootHdr != nil { value.IDs.UID = rootHdr.Uid value.IDs.GID = rootHdr.Gid - value.Mode = os.FileMode(rootHdr.Mode) + value.Mode = os.ModeDir | os.FileMode(rootHdr.Mode) } if err := idtools.SetContainersOverrideXattr(dest, value); err != nil { return err @@ -1190,9 +1267,6 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp if options == nil { options = &TarOptions{} } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } r := tarArchive if decompress { @@ -1378,8 +1452,8 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id } else if runtime.GOOS == darwin { uid, gid = hdr.Uid, hdr.Gid if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok { - attrs := strings.Split(string(xstat), ":") - if len(attrs) == 3 { + attrs := strings.Split(xstat, ":") + if len(attrs) >= 3 { val, err := strconv.ParseUint(attrs[0], 10, 32) if err != nil { uid = int(val) @@ -1416,7 +1490,7 @@ func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { if _, err := io.Copy(f, src); err != nil { return nil, err } - if _, err := f.Seek(0, 0); err != nil { + if _, err := f.Seek(0, io.SeekStart); err != nil { return nil, err } st, err := f.Stat() diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index b9d718b6..b3245f7f 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -16,7 +16,7 @@ func getOverlayOpaqueXattrName() string { return GetOverlayXattrName("opaque") } -func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { +func GetWhiteoutConverter(format WhiteoutFormat, data any) TarWhiteoutConverter { if format == OverlayWhiteoutFormat { if rolayers, ok := data.([]string); ok && len(rolayers) > 0 { return overlayWhiteoutConverter{rolayers: rolayers} @@ -173,7 +173,7 @@ func (o overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (boo func isWhiteOut(stat os.FileInfo) bool { s := stat.Sys().(*syscall.Stat_t) - return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 + return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 //nolint:unconvert } func GetFileOwner(path string) (uint32, uint32, uint32, error) { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go index 56f2086b..d9bcb97b 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -67,7 +67,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err error) { s, ok := stat.(*syscall.Stat_t) if ok { @@ -82,7 +82,7 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) ( return } -func getInodeFromStat(stat interface{}) (inode uint64, err error) { +func getInodeFromStat(stat any) (inode uint64) { s, ok := stat.(*syscall.Stat_t) if ok { @@ -92,7 +92,7 @@ func getInodeFromStat(stat interface{}) (inode uint64, err error) { return } -func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { +func getFileUIDGID(stat any) (idtools.IDPair, error) { s, ok := stat.(*syscall.Stat_t) if !ok { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go index 6db31cf4..e9878286 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -57,7 +57,7 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) ( return } -func getInodeFromStat(stat interface{}) (inode uint64, err error) { +func getInodeFromStat(stat interface{}) (inode uint64) { // do nothing. no notion of Inode in stat on Windows return } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index 3075c27b..76edac85 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -70,7 +70,7 @@ func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } // files, we handle this by comparing for exact times, *or* same // second count and either a or b having exactly 0 nanoseconds func sameFsTime(a, b time.Time) bool { - return a == b || + return a.Equal(b) || (a.Unix() == b.Unix() && (a.Nanosecond() == 0 || b.Nanosecond() == 0)) } @@ -270,6 +270,7 @@ type FileInfo struct { capability []byte added bool xattrs map[string]string + target string } // LookUp looks up the file information of a file. @@ -336,6 +337,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { // back mtime if statDifferent(oldStat, oldInfo, newStat, info) || !bytes.Equal(oldChild.capability, newChild.capability) || + oldChild.target != newChild.target || !reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) { change := Change{ Path: newChild.path(), @@ -390,6 +392,7 @@ func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { name: string(os.PathSeparator), idMappings: idMappings, children: make(map[string]*FileInfo), + target: "", } return root } @@ -449,7 +452,7 @@ func ChangesSize(newDir string, changes []Change) int64 { func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { reader, writer := io.Pipe() go func() { - ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + ta := newTarWriter(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil, nil) // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) @@ -478,8 +481,14 @@ func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMa } } else { path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { + headers, err := ta.prepareAddFile(path, change.Path[1:]) + if err != nil { logrus.Debugf("Can't add file %s to tar: %s", path, err) + } else if headers != nil { + if err := ta.addFile(headers); err != nil { + writer.CloseWithError(err) + return + } } } } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go index dc308120..95284bb8 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -79,6 +79,7 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { children: make(map[string]*FileInfo), parent: parent, idMappings: root.idMappings, + target: "", } cpath := filepath.Join(dir, path) stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) @@ -87,11 +88,11 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { } info.stat = stat info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - if err != nil && !errors.Is(err, system.EOPNOTSUPP) { + if err != nil && !errors.Is(err, system.ENOTSUP) { return err } xattrs, err := system.Llistxattr(cpath) - if err != nil && !errors.Is(err, system.EOPNOTSUPP) { + if err != nil && !errors.Is(err, system.ENOTSUP) { return err } for _, key := range xattrs { @@ -110,6 +111,12 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { info.xattrs[key] = string(value) } } + if fi.Mode()&os.ModeSymlink != 0 { + info.target, err = os.Readlink(cpath) + if err != nil { + return err + } + } parent.children[info.name] = info return nil } @@ -167,14 +174,7 @@ func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { ix1 := 0 ix2 := 0 - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - + for ix1 < len(names1) && ix2 < len(names2) { ni1 := names1[ix1] ni2 := names2[ix2] @@ -297,7 +297,7 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) continue } builder := make([]byte, 0, dirent.Reclen) - for i := 0; i < len(dirent.Name); i++ { + for i := range len(dirent.Name) { if dirent.Name[i] == 0 { break } diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index ceaa8b0b..6f306777 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -31,9 +31,6 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if options == nil { options = &TarOptions{} } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) aufsTempdir := "" @@ -107,7 +104,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } defer os.RemoveAll(aufsTempdir) } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + if err := extractTarFileEntry(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { return 0, err } } @@ -176,12 +173,12 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, // We always reset the immutable flag (if present) to allow metadata // changes and to allow directory modification. The flag will be // re-applied based on the contents of hdr either at the end for - // directories or in createTarFile otherwise. + // directories or in extractTarFileEntry otherwise. if fi, err := os.Lstat(path); err == nil { if err := resetImmutable(path, &fi); err != nil { return 0, err } - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { if err := os.RemoveAll(path); err != nil { return 0, err } @@ -212,7 +209,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, return 0, err } - if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + if err := extractTarFileEntry(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { return 0, err } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index 710167d2..0ebb1cb8 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -47,7 +47,7 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error // This should be used to prevent a potential attacker from manipulating `dest` // such that it would provide access to files outside of `dest` through things // like symlinks. Normally `ResolveSymlinksInScope` would handle this, however -// sanitizing symlinks in this manner is inherrently racey: +// sanitizing symlinks in this manner is inherently racey: // ref: CVE-2018-15664 func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error { return untarHandler(tarArchive, dest, options, true, root) @@ -69,9 +69,6 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions options = &archive.TarOptions{} options.InUserNS = unshare.IsRootless() } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMappings.RootPair() diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go index ee215ce4..361e23d3 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -98,9 +98,6 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions options.InUserNS = true } } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } data, err := json.Marshal(options) if err != nil { diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go deleted file mode 100644 index fa17c9bf..00000000 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go +++ /dev/null @@ -1,4 +0,0 @@ -package chrootarchive - -func init() { -} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go deleted file mode 100644 index fa17c9bf..00000000 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package chrootarchive - -func init() { -} diff --git a/vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go index 09e75680..c2dbe19a 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go @@ -65,7 +65,7 @@ func (bf *bloomFilter) writeTo(writer io.Writer) error { if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil { return err } - if err := binary.Write(writer, binary.LittleEndian, uint32(bf.k)); err != nil { + if err := binary.Write(writer, binary.LittleEndian, bf.k); err != nil { return err } if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 47742b05..0e49ddd8 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -16,7 +16,7 @@ import ( storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" "github.com/containers/storage/pkg/ioutils" "github.com/docker/go-units" jsoniter "github.com/json-iterator/go" @@ -710,7 +710,7 @@ func prepareCacheFile(manifest []byte, format graphdriver.DifferOutputFormat) ([ switch format { case graphdriver.DifferOutputFormatDir: case graphdriver.DifferOutputFormatFlat: - entries, err = makeEntriesFlat(entries) + entries, err = makeEntriesFlat(entries, nil) if err != nil { return nil, err } @@ -848,12 +848,12 @@ func (c *layersCache) findFileInOtherLayers(file *fileMetadata, useHardLinks boo return "", "", nil } -func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) { +func (c *layersCache) findChunkInOtherLayers(chunk *minimal.FileMetadata) (string, string, int64, error) { return c.findDigestInternal(chunk.ChunkDigest) } -func unmarshalToc(manifest []byte) (*internal.TOC, error) { - var toc internal.TOC +func unmarshalToc(manifest []byte) (*minimal.TOC, error) { + var toc minimal.TOC iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) @@ -864,7 +864,7 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { case "entries": for iter.ReadArray() { - var m internal.FileMetadata + var m minimal.FileMetadata for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { switch strings.ToLower(field) { case "type": diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go index e828d479..564efc8b 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go @@ -4,18 +4,18 @@ import ( "io" "github.com/containers/storage/pkg/chunked/compressor" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" ) const ( - TypeReg = internal.TypeReg - TypeChunk = internal.TypeChunk - TypeLink = internal.TypeLink - TypeChar = internal.TypeChar - TypeBlock = internal.TypeBlock - TypeDir = internal.TypeDir - TypeFifo = internal.TypeFifo - TypeSymlink = internal.TypeSymlink + TypeReg = minimal.TypeReg + TypeChunk = minimal.TypeChunk + TypeLink = minimal.TypeLink + TypeChar = minimal.TypeChar + TypeBlock = minimal.TypeBlock + TypeDir = minimal.TypeDir + TypeFifo = minimal.TypeFifo + TypeSymlink = minimal.TypeSymlink ) // ZstdCompressor is a CompressorFunc for the zstd compression algorithm. diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 2dac4635..ddd7ff53 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -7,17 +7,25 @@ import ( "fmt" "io" "maps" + "os" + "slices" "strconv" "time" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/vbatts/tar-split/archive/tar" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" - expMaps "golang.org/x/exp/maps" + "golang.org/x/sys/unix" +) + +const ( + // maxTocSize is the maximum size of a blob that we will attempt to process. + // It is used to prevent DoS attacks from layers that embed a very large TOC file. + maxTocSize = (1 << 20) * 150 ) var typesToTar = map[string]byte{ @@ -38,31 +46,33 @@ func typeToTarType(t string) (byte, error) { return r, nil } +// readEstargzChunkedManifest reads the estargz manifest from the seekable stream blobStream. +// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert. func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest) ([]byte, int64, error) { // information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md footerSize := int64(51) if blobSize <= footerSize { return nil, 0, errors.New("blob too small") } - chunk := ImageSourceChunk{ - Offset: uint64(blobSize - footerSize), - Length: uint64(footerSize), - } - parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + + footer := make([]byte, footerSize) + streamsOrErrors, err := getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(blobSize - footerSize), Length: uint64(footerSize)}) if err != nil { + var badRequestErr ErrBadRequest + if errors.As(err, &badRequestErr) { + err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)} + } return nil, 0, err } - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, 0, err - } - defer reader.Close() - footer := make([]byte, footerSize) - if _, err := io.ReadFull(reader, footer); err != nil { - return nil, 0, err + + for soe := range streamsOrErrors { + if soe.stream != nil { + _, err = io.ReadFull(soe.stream, footer) + _ = soe.stream.Close() + } + if soe.err != nil && err == nil { + err = soe.err + } } /* Read the ToC offset: @@ -79,50 +89,61 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, return nil, 0, fmt.Errorf("parse ToC offset: %w", err) } - size := int64(blobSize - footerSize - tocOffset) + size := blobSize - footerSize - tocOffset // set a reasonable limit - if size > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") + if size > maxTocSize { + // Not errFallbackCanConvert: we would still use too much memory. + return nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("estargz manifest too big to process in memory (%d bytes)", size)) } - chunk = ImageSourceChunk{ - Offset: uint64(tocOffset), - Length: uint64(size), - } - parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk}) - if err != nil { - return nil, 0, err - } - - var tocReader io.ReadCloser - select { - case r := <-parts: - tocReader = r - case err := <-errs: - return nil, 0, err - } - defer tocReader.Close() - - r, err := pgzip.NewReader(tocReader) + streamsOrErrors, err = getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(tocOffset), Length: uint64(size)}) if err != nil { + var badRequestErr ErrBadRequest + if errors.As(err, &badRequestErr) { + err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)} + } return nil, 0, err } - defer r.Close() - - aTar := archivetar.NewReader(r) - header, err := aTar.Next() - if err != nil { - return nil, 0, err + var manifestUncompressed []byte + + for soe := range streamsOrErrors { + if soe.stream != nil { + err1 := func() error { + defer soe.stream.Close() + + r, err := pgzip.NewReader(soe.stream) + if err != nil { + return err + } + defer r.Close() + + aTar := archivetar.NewReader(r) + + header, err := aTar.Next() + if err != nil { + return err + } + // set a reasonable limit + if header.Size > maxTocSize { + return errors.New("manifest too big") + } + + manifestUncompressed = make([]byte, header.Size) + if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil { + return err + } + return nil + }() + if err == nil { + err = err1 + } + } else if err == nil { + err = soe.err + } } - // set a reasonable limit - if header.Size > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") - } - - manifestUncompressed := make([]byte, header.Size) - if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil { - return nil, 0, err + if manifestUncompressed == nil { + return nil, 0, errors.New("manifest not found") } manifestDigester := digest.Canonical.Digester() @@ -138,12 +159,39 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, return manifestUncompressed, tocOffset, nil } +func openTmpFile(tmpDir string) (*os.File, error) { + file, err := os.OpenFile(tmpDir, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC|unix.O_EXCL, 0o600) + if err == nil { + return file, nil + } + return openTmpFileNoTmpFile(tmpDir) +} + +// openTmpFileNoTmpFile is a fallback used by openTmpFile when the underlying file system does not +// support O_TMPFILE. +func openTmpFileNoTmpFile(tmpDir string) (*os.File, error) { + file, err := os.CreateTemp(tmpDir, ".tmpfile") + if err != nil { + return nil, err + } + // Unlink the file immediately so that only the open fd refers to it. + _ = os.Remove(file.Name()) + return file, nil +} + // readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. -// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset). -func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) { - offsetMetadata := annotations[internal.ManifestInfoKey] +// tmpDir is a directory where the tar-split temporary file is written to. The file is opened with +// O_TMPFILE so that it is automatically removed when it is closed. +// Returns (manifest blob, parsed manifest, tar-split file or nil, manifest offset). +// The opened tar-split file’s position is unspecified. +// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert. +// The compressed parameter indicates whether the manifest and tar-split data are zstd-compressed +// (true) or stored uncompressed (false). Uncompressed data is used only for an optimization to convert +// a regular OCI layer to zstd:chunked when convert_images is set, and it is not used for distributed images. +func readZstdChunkedManifest(tmpDir string, blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string, compressed bool) (_ []byte, _ *minimal.TOC, _ *os.File, _ int64, retErr error) { + offsetMetadata := annotations[minimal.ManifestInfoKey] if offsetMetadata == "" { - return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey) + return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", minimal.ManifestInfoKey) } var manifestChunk ImageSourceChunk var manifestLengthUncompressed, manifestType uint64 @@ -153,48 +201,59 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di // The tarSplit… values are valid if tarSplitChunk.Offset > 0 var tarSplitChunk ImageSourceChunk var tarSplitLengthUncompressed uint64 - if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found { + if tarSplitInfoKeyAnnotation, found := annotations[minimal.TarSplitInfoKey]; found { if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil { return nil, nil, nil, 0, err } } - if manifestType != internal.ManifestTypeCRFS { + if manifestType != minimal.ManifestTypeCRFS { return nil, nil, nil, 0, errors.New("invalid manifest type") } // set a reasonable limit - if manifestChunk.Length > (1<<20)*50 { - return nil, nil, nil, 0, errors.New("manifest too big") + if manifestChunk.Length > maxTocSize { + // Not errFallbackCanConvert: we would still use too much memory. + return nil, nil, nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked manifest too big to process in memory (%d bytes compressed)", manifestChunk.Length)) } - if manifestLengthUncompressed > (1<<20)*50 { - return nil, nil, nil, 0, errors.New("manifest too big") + if manifestLengthUncompressed > maxTocSize { + // Not errFallbackCanConvert: we would still use too much memory. + return nil, nil, nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked manifest too big to process in memory (%d bytes uncompressed)", manifestLengthUncompressed)) } chunks := []ImageSourceChunk{manifestChunk} if tarSplitChunk.Offset > 0 { chunks = append(chunks, tarSplitChunk) } - parts, errs, err := blobStream.GetBlobAt(chunks) + + streamsOrErrors, err := getBlobAt(blobStream, chunks...) if err != nil { + var badRequestErr ErrBadRequest + if errors.As(err, &badRequestErr) { + err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)} + } return nil, nil, nil, 0, err } + defer func() { + err := ensureAllBlobsDone(streamsOrErrors) + if retErr == nil { + retErr = err + } + }() + readBlob := func(len uint64) ([]byte, error) { - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, err + soe, ok := <-streamsOrErrors + if !ok { + return nil, errors.New("stream closed") + } + if soe.err != nil { + return nil, soe.err } + defer soe.stream.Close() blob := make([]byte, len) - if _, err := io.ReadFull(reader, blob); err != nil { - reader.Close() - return nil, err - } - if err := reader.Close(); err != nil { + if _, err := io.ReadFull(soe.stream, blob); err != nil { return nil, err } return blob, nil @@ -205,7 +264,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di return nil, nil, nil, 0, err } - decodedBlob, err := decodeAndValidateBlob(manifest, manifestLengthUncompressed, tocDigest.String()) + decodedBlob, err := decodeAndValidateBlob(manifest, manifestLengthUncompressed, tocDigest.String(), compressed) if err != nil { return nil, nil, nil, 0, fmt.Errorf("validating and decompressing TOC: %w", err) } @@ -214,17 +273,25 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err) } - var decodedTarSplit []byte = nil + var decodedTarSplit *os.File if toc.TarSplitDigest != "" { if tarSplitChunk.Offset <= 0 { - return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey) + return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", minimal.TarSplitInfoKey) } tarSplit, err := readBlob(tarSplitChunk.Length) if err != nil { return nil, nil, nil, 0, err } - decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, toc.TarSplitDigest.String()) + decodedTarSplit, err = openTmpFile(tmpDir) if err != nil { + return nil, nil, nil, 0, err + } + defer func() { + if retErr != nil { + decodedTarSplit.Close() + } + }() + if err := decodeAndValidateBlobToStream(tarSplit, decodedTarSplit, toc.TarSplitDigest.String(), compressed); err != nil { return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err) } // We use the TOC for creating on-disk files, but the tar-split for creating metadata @@ -243,15 +310,15 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di return nil, nil, nil, 0, err } } - return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err + return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), nil } // ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries. -func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error { - pendingFiles := map[string]*internal.FileMetadata{} // Name -> an entry in toc.Entries +func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit *os.File) error { + pendingFiles := map[string]*minimal.FileMetadata{} // Name -> an entry in toc.Entries for i := range toc.Entries { e := &toc.Entries[i] - if e.Type != internal.TypeChunk { + if e.Type != minimal.TypeChunk { if _, ok := pendingFiles[e.Name]; ok { return fmt.Errorf("TOC contains duplicate entries for path %q", e.Name) } @@ -259,14 +326,18 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error { } } - unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit)) + if _, err := tarSplit.Seek(0, io.SeekStart); err != nil { + return err + } + + unpacker := storage.NewJSONUnpacker(tarSplit) if err := asm.IterateHeaders(unpacker, func(hdr *tar.Header) error { e, ok := pendingFiles[hdr.Name] if !ok { return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name) } delete(pendingFiles, hdr.Name) - expected, err := internal.NewFileMetadata(hdr) + expected, err := minimal.NewFileMetadata(hdr) if err != nil { return fmt.Errorf("determining expected metadata for %q: %w", hdr.Name, err) } @@ -279,7 +350,7 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error { return err } if len(pendingFiles) != 0 { - remaining := expMaps.Keys(pendingFiles) + remaining := slices.Collect(maps.Keys(pendingFiles)) if len(remaining) > 5 { remaining = remaining[:5] // Just to limit the size of the output. } @@ -289,10 +360,10 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error { } // tarSizeFromTarSplit computes the total tarball size, using only the tarSplit metadata -func tarSizeFromTarSplit(tarSplit []byte) (int64, error) { +func tarSizeFromTarSplit(tarSplit io.Reader) (int64, error) { var res int64 = 0 - unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit)) + unpacker := storage.NewJSONUnpacker(tarSplit) for { entry, err := unpacker.Next() if err != nil { @@ -347,8 +418,8 @@ func ensureTimePointersMatch(a, b *time.Time) error { // ensureFileMetadataAttributesMatch ensures that a and b match in file attributes (it ignores entries relevant to locating data // in the tar stream or matching contents) -func ensureFileMetadataAttributesMatch(a, b *internal.FileMetadata) error { - // Keep this in sync with internal.FileMetadata! +func ensureFileMetadataAttributesMatch(a, b *minimal.FileMetadata) error { + // Keep this in sync with minimal.FileMetadata! if a.Type != b.Type { return fmt.Errorf("mismatch of Type: %q != %q", a.Type, b.Type) @@ -402,22 +473,33 @@ func ensureFileMetadataAttributesMatch(a, b *internal.FileMetadata) error { return nil } -func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) { +func validateBlob(blob []byte, expectedCompressedChecksum string) error { d, err := digest.Parse(expectedCompressedChecksum) if err != nil { - return nil, fmt.Errorf("invalid digest %q: %w", expectedCompressedChecksum, err) + return fmt.Errorf("invalid digest %q: %w", expectedCompressedChecksum, err) } blobDigester := d.Algorithm().Digester() blobChecksum := blobDigester.Hash() if _, err := blobChecksum.Write(blob); err != nil { - return nil, err + return err } if blobDigester.Digest() != d { - return nil, fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest()) + return fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest()) + } + return nil +} + +func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string, compressed bool) ([]byte, error) { + if err := validateBlob(blob, expectedCompressedChecksum); err != nil { + return nil, err + } + + if !compressed { + return blob, nil } - decoder, err := zstd.NewReader(nil) //nolint:contextcheck + decoder, err := zstd.NewReader(nil) if err != nil { return nil, err } @@ -426,3 +508,23 @@ func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompr b := make([]byte, 0, lengthUncompressed) return decoder.DecodeAll(blob, b) } + +func decodeAndValidateBlobToStream(blob []byte, w *os.File, expectedCompressedChecksum string, compressed bool) error { + if err := validateBlob(blob, expectedCompressedChecksum); err != nil { + return err + } + + if !compressed { + _, err := w.Write(blob) + return err + } + + decoder, err := zstd.NewReader(bytes.NewReader(blob)) + if err != nil { + return err + } + defer decoder.Close() + + _, err = decoder.WriteTo(w) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index 65496974..2930723a 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -9,9 +9,8 @@ import ( "bytes" "io" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" "github.com/containers/storage/pkg/ioutils" - "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" "github.com/vbatts/tar-split/archive/tar" "github.com/vbatts/tar-split/tar/asm" @@ -142,10 +141,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { rc.IsLastChunkZeros = false if rc.pendingHole > 0 { - toCopy := int64(len(b)) - if rc.pendingHole < toCopy { - toCopy = rc.pendingHole - } + toCopy := min(rc.pendingHole, int64(len(b))) rc.pendingHole -= toCopy for i := int64(0); i < toCopy; i++ { b[i] = 0 @@ -163,7 +159,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { return false, 0, io.EOF } - for i := 0; i < len(b); i++ { + for i := range b { holeLen, n, err := rc.reader.readByte() if err != nil { if err == io.EOF { @@ -205,15 +201,15 @@ type tarSplitData struct { compressed *bytes.Buffer digester digest.Digester uncompressedCounter *ioutils.WriteCounter - zstd *zstd.Encoder + zstd minimal.ZstdWriter packer storage.Packer } -func newTarSplitData(level int) (*tarSplitData, error) { +func newTarSplitData(createZstdWriter minimal.CreateZstdWriterFunc) (*tarSplitData, error) { compressed := bytes.NewBuffer(nil) digester := digest.Canonical.Digester() - zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level) + zstdWriter, err := createZstdWriter(io.MultiWriter(compressed, digester.Hash())) if err != nil { return nil, err } @@ -230,11 +226,11 @@ func newTarSplitData(level int) (*tarSplitData, error) { }, nil } -func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { +func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, createZstdWriter minimal.CreateZstdWriterFunc) error { // total written so far. Used to retrieve partial offsets in the file dest := ioutils.NewWriteCounter(destFile) - tarSplitData, err := newTarSplitData(level) + tarSplitData, err := newTarSplitData(createZstdWriter) if err != nil { return err } @@ -254,7 +250,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r buf := make([]byte, 4096) - zstdWriter, err := internal.ZstdWriterWithLevel(dest, level) + zstdWriter, err := createZstdWriter(dest) if err != nil { return err } @@ -276,7 +272,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r return offset, nil } - var metadata []internal.FileMetadata + var metadata []minimal.FileMetadata for { hdr, err := tr.Next() if err != nil { @@ -341,9 +337,9 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r chunkSize := rcReader.WrittenOut - lastChunkOffset if chunkSize > 0 { - chunkType := internal.ChunkTypeData + chunkType := minimal.ChunkTypeData if rcReader.IsLastChunkZeros { - chunkType = internal.ChunkTypeZeros + chunkType = minimal.ChunkTypeZeros } chunks = append(chunks, chunk{ @@ -368,17 +364,17 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r } } - mainEntry, err := internal.NewFileMetadata(hdr) + mainEntry, err := minimal.NewFileMetadata(hdr) if err != nil { return err } mainEntry.Digest = checksum mainEntry.Offset = startOffset mainEntry.EndOffset = lastOffset - entries := []internal.FileMetadata{mainEntry} + entries := []minimal.FileMetadata{mainEntry} for i := 1; i < len(chunks); i++ { - entries = append(entries, internal.FileMetadata{ - Type: internal.TypeChunk, + entries = append(entries, minimal.FileMetadata{ + Type: minimal.TypeChunk, Name: hdr.Name, ChunkOffset: chunks[i].ChunkOffset, }) @@ -407,30 +403,23 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r return err } - if err := zstdWriter.Flush(); err != nil { - zstdWriter.Close() - return err - } if err := zstdWriter.Close(); err != nil { return err } zstdWriter = nil - if err := tarSplitData.zstd.Flush(); err != nil { - return err - } if err := tarSplitData.zstd.Close(); err != nil { return err } tarSplitData.zstd = nil - ts := internal.TarSplitData{ + ts := minimal.TarSplitData{ Data: tarSplitData.compressed.Bytes(), Digest: tarSplitData.digester.Digest(), UncompressedSize: tarSplitData.uncompressedCounter.Count, } - return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level) + return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, createZstdWriter) } type zstdChunkedWriter struct { @@ -457,7 +446,7 @@ func (w zstdChunkedWriter) Write(p []byte) (int, error) { } } -// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is +// makeZstdChunkedWriter writes a zstd compressed tarball where each file is // compressed separately so it can be addressed separately. Idea based on CRFS: // https://github.com/google/crfs // The difference with CRFS is that the zstd compression is used instead of gzip. @@ -472,12 +461,12 @@ func (w zstdChunkedWriter) Write(p []byte) (int, error) { // [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST] // [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER] // MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format. -func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) { +func makeZstdChunkedWriter(out io.Writer, metadata map[string]string, createZstdWriter minimal.CreateZstdWriterFunc) (io.WriteCloser, error) { ch := make(chan error, 1) r, w := io.Pipe() go func() { - ch <- writeZstdChunkedStream(out, metadata, r, level) + ch <- writeZstdChunkedStream(out, metadata, r, createZstdWriter) _, _ = io.Copy(io.Discard, r) // Ordinarily writeZstdChunkedStream consumes all of r. If it fails, ensure the write end never blocks and eventually terminates. r.Close() close(ch) @@ -496,5 +485,40 @@ func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.Wri level = &l } - return zstdChunkedWriterWithLevel(r, metadata, *level) + createZstdWriter := func(dest io.Writer) (minimal.ZstdWriter, error) { + return minimal.ZstdWriterWithLevel(dest, *level) + } + + return makeZstdChunkedWriter(r, metadata, createZstdWriter) +} + +type noCompression struct { + dest io.Writer +} + +func (n *noCompression) Write(p []byte) (int, error) { + return n.dest.Write(p) +} + +func (n *noCompression) Close() error { + return nil +} + +func (n *noCompression) Flush() error { + return nil +} + +func (n *noCompression) Reset(dest io.Writer) { + n.dest = dest +} + +// NoCompression writes directly to the output file without any compression +// +// Such an output does not follow the zstd:chunked spec and cannot be generally consumed; this function +// only exists for internal purposes and should not be called from outside c/storage. +func NoCompression(r io.Writer, metadata map[string]string) (io.WriteCloser, error) { + createZstdWriter := func(dest io.Writer) (minimal.ZstdWriter, error) { + return &noCompression{dest: dest}, nil + } + return makeZstdChunkedWriter(r, metadata, createZstdWriter) } diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go index d98cee09..0bda7a0d 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -9,10 +9,11 @@ import ( "io" "path/filepath" "reflect" - "strings" "time" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" + storagePath "github.com/containers/storage/pkg/chunked/internal/path" + "github.com/opencontainers/go-digest" "golang.org/x/sys/unix" ) @@ -42,7 +43,7 @@ func escaped(val []byte, escape int) string { } var result string - for _, c := range []byte(val) { + for _, c := range val { hexEscape := false var special string @@ -85,17 +86,17 @@ func escapedOptional(val []byte, escape int) string { func getStMode(mode uint32, typ string) (uint32, error) { switch typ { - case internal.TypeReg, internal.TypeLink: + case minimal.TypeReg, minimal.TypeLink: mode |= unix.S_IFREG - case internal.TypeChar: + case minimal.TypeChar: mode |= unix.S_IFCHR - case internal.TypeBlock: + case minimal.TypeBlock: mode |= unix.S_IFBLK - case internal.TypeDir: + case minimal.TypeDir: mode |= unix.S_IFDIR - case internal.TypeFifo: + case minimal.TypeFifo: mode |= unix.S_IFIFO - case internal.TypeSymlink: + case minimal.TypeSymlink: mode |= unix.S_IFLNK default: return 0, fmt.Errorf("unknown type %s", typ) @@ -103,24 +104,14 @@ func getStMode(mode uint32, typ string) (uint32, error) { return mode, nil } -func sanitizeName(name string) string { - path := filepath.Clean(name) - if path == "." { - path = "/" - } else if path[0] != '/' { - path = "/" + path - } - return path -} - -func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { - path := sanitizeName(entry.Name) +func dumpNode(out io.Writer, added map[string]*minimal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *minimal.FileMetadata) error { + path := storagePath.CleanAbsPath(entry.Name) parent := filepath.Dir(path) if _, found := added[parent]; !found && path != "/" { - parentEntry := &internal.FileMetadata{ + parentEntry := &minimal.FileMetadata{ Name: parent, - Type: internal.TypeDir, + Type: minimal.TypeDir, Mode: 0o755, } if err := dumpNode(out, added, links, verityDigests, parentEntry); err != nil { @@ -143,7 +134,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[ nlinks := links[entry.Name] + links[entry.Linkname] + 1 link := "" - if entry.Type == internal.TypeLink { + if entry.Type == minimal.TypeLink { link = "@" } @@ -169,16 +160,21 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[ var payload string if entry.Linkname != "" { - if entry.Type == internal.TypeSymlink { + if entry.Type == minimal.TypeSymlink { payload = entry.Linkname } else { - payload = sanitizeName(entry.Linkname) + payload = storagePath.CleanAbsPath(entry.Linkname) } - } else { - if len(entry.Digest) > 10 { - d := strings.Replace(entry.Digest, "sha256:", "", 1) - payload = d[:2] + "/" + d[2:] + } else if entry.Digest != "" { + d, err := digest.Parse(entry.Digest) + if err != nil { + return fmt.Errorf("invalid digest %q for %q: %w", entry.Digest, entry.Name, err) + } + path, err := storagePath.RegularFilePathForValidatedDigest(d) + if err != nil { + return fmt.Errorf("determining physical file path for %q: %w", entry.Name, err) } + payload = path } if _, err := fmt.Fprint(out, escapedOptional([]byte(payload), ESCAPE_LONE_DASH)); err != nil { @@ -218,8 +214,8 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[ } // GenerateDump generates a dump of the TOC in the same format as `composefs-info dump` -func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) { - toc, ok := tocI.(*internal.TOC) +func GenerateDump(tocI any, verityDigests map[string]string) (io.Reader, error) { + toc, ok := tocI.(*minimal.TOC) if !ok { return nil, fmt.Errorf("invalid TOC type") } @@ -235,21 +231,21 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, }() links := make(map[string]int) - added := make(map[string]*internal.FileMetadata) + added := make(map[string]*minimal.FileMetadata) for _, e := range toc.Entries { if e.Linkname == "" { continue } - if e.Type == internal.TypeSymlink { + if e.Type == minimal.TypeSymlink { continue } links[e.Linkname] = links[e.Linkname] + 1 } if len(toc.Entries) == 0 { - root := &internal.FileMetadata{ + root := &minimal.FileMetadata{ Name: "/", - Type: internal.TypeDir, + Type: minimal.TypeDir, Mode: 0o755, } @@ -261,7 +257,7 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, } for _, e := range toc.Entries { - if e.Type == internal.TypeChunk { + if e.Type == minimal.TypeChunk { continue } if err := dumpNode(w, added, links, verityDigests, &e); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go index e26e5e5c..82685e9c 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go @@ -15,7 +15,8 @@ import ( driversCopy "github.com/containers/storage/drivers/copy" "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" + storagePath "github.com/containers/storage/pkg/chunked/internal/path" securejoin "github.com/cyphar/filepath-securejoin" "github.com/vbatts/tar-split/archive/tar" "golang.org/x/sys/unix" @@ -34,14 +35,14 @@ func procPathForFd(fd int) string { return fmt.Sprintf("/proc/self/fd/%d", fd) } -// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that +// fileMetadata is a wrapper around minimal.FileMetadata with additional private fields that // are not part of the TOC document. // Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk. type fileMetadata struct { - internal.FileMetadata + minimal.FileMetadata // chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg. - chunks []*internal.FileMetadata + chunks []*minimal.FileMetadata // skipSetAttrs is set when the file attributes must not be // modified, e.g. it is a hard link from a different source, @@ -49,10 +50,37 @@ type fileMetadata struct { skipSetAttrs bool } +// splitPath takes a file path as input and returns two components: dir and base. +// Differently than filepath.Split(), this function handles some edge cases. +// If the path refers to a file in the root directory, the returned dir is "/". +// The returned base value is never empty, it never contains any slash and the +// value "..". +func splitPath(path string) (string, string, error) { + path = storagePath.CleanAbsPath(path) + dir, base := filepath.Split(path) + if base == "" { + base = "." + } + // Remove trailing slashes from dir, but make sure that "/" is preserved. + dir = strings.TrimSuffix(dir, "/") + if dir == "" { + dir = "/" + } + + if strings.Contains(base, "/") { + // This should never happen, but be safe as the base is passed to *at syscalls. + return "", "", fmt.Errorf("internal error: splitPath(%q) contains a slash", path) + } + return dir, base, nil +} + func doHardLink(dirfd, srcFd int, destFile string) error { - destDir, destBase := filepath.Split(destFile) + destDir, destBase, err := splitPath(destFile) + if err != nil { + return err + } destDirFd := dirfd - if destDir != "" && destDir != "." { + if destDir != "/" { f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0) if err != nil { return err @@ -72,7 +100,7 @@ func doHardLink(dirfd, srcFd int, destFile string) error { return nil } - err := doLink() + err = doLink() // if the destination exists, unlink it first and try again if err != nil && os.IsExist(err) { @@ -281,8 +309,11 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the // last component as the path to openat(). if hasNoFollow { - dirName, baseName := filepath.Split(name) - if dirName != "" && dirName != "." { + dirName, baseName, err := splitPath(name) + if err != nil { + return -1, err + } + if dirName != "/" { newRoot, err := securejoin.SecureJoin(root, dirName) if err != nil { return -1, err @@ -409,7 +440,8 @@ func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.Fil if errors.Is(err, unix.ENOENT) { parent := filepath.Dir(name) - if parent != "" { + // do not create the root directory, it should always exist + if parent != name { pDir, err2 := openOrCreateDirUnderRoot(dirfd, parent, mode) if err2 != nil { return nil, err @@ -448,9 +480,12 @@ func appendHole(fd int, name string, size int64) error { } func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error { - parent, base := filepath.Split(name) + parent, base, err := splitPath(name) + if err != nil { + return err + } parentFd := dirfd - if parent != "" && parent != "." { + if parent != "/" { parentFile, err := openOrCreateDirUnderRoot(dirfd, parent, 0) if err != nil { return err @@ -506,9 +541,12 @@ func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *arch } func safeSymlink(dirfd int, metadata *fileMetadata) error { - destDir, destBase := filepath.Split(metadata.Name) + destDir, destBase, err := splitPath(metadata.Name) + if err != nil { + return err + } destDirFd := dirfd - if destDir != "" && destDir != "." { + if destDir != "/" { f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0) if err != nil { return err @@ -542,9 +580,12 @@ func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { } func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { - dir, base := filepath.Split(path) + dir, base, err := splitPath(path) + if err != nil { + return err + } dirfd := d.Dirfd - if dir != "" && dir != "." { + if dir != "/" { dir, err := openOrCreateDirUnderRoot(d.Dirfd, dir, 0) if err != nil { return err diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go similarity index 94% rename from vendor/github.com/containers/storage/pkg/chunked/internal/compression.go rename to vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go index 2a24e4bf..4191524c 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go @@ -1,4 +1,4 @@ -package internal +package minimal // NOTE: This is used from github.com/containers/image by callers that // don't otherwise use containers/storage, so don't make this depend on any @@ -20,6 +20,15 @@ import ( "github.com/vbatts/tar-split/archive/tar" ) +// ZstdWriter is an interface that wraps standard io.WriteCloser and Reset() to reuse the compressor with a new writer. +type ZstdWriter interface { + io.WriteCloser + Reset(dest io.Writer) +} + +// CreateZstdWriterFunc is a function that creates a ZstdWriter for the provided destination writer. +type CreateZstdWriterFunc func(dest io.Writer) (ZstdWriter, error) + // TOC is short for Table of Contents and is used by the zstd:chunked // file format to effectively add an overall index into the contents // of a tarball; it also includes file metadata. @@ -179,7 +188,7 @@ type TarSplitData struct { UncompressedSize int64 } -func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, level int) error { +func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, createZstdWriter CreateZstdWriterFunc) error { // 8 is the size of the zstd skippable frame header + the frame size const zstdSkippableFrameHeader = 8 manifestOffset := offset + zstdSkippableFrameHeader @@ -198,7 +207,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off } var compressedBuffer bytes.Buffer - zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level) + zstdWriter, err := createZstdWriter(&compressedBuffer) if err != nil { return err } @@ -234,7 +243,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off Offset: manifestOffset, LengthCompressed: uint64(len(compressedManifest)), LengthUncompressed: uint64(len(manifest)), - OffsetTarSplit: uint64(tarSplitOffset), + OffsetTarSplit: tarSplitOffset, LengthCompressedTarSplit: uint64(len(tarSplitData.Data)), LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize), } @@ -244,7 +253,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off return appendZstdSkippableFrame(dest, manifestDataLE) } -func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { +func ZstdWriterWithLevel(dest io.Writer, level int) (ZstdWriter, error) { el := zstd.EncoderLevelFromZstd(level) return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) } diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go b/vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go new file mode 100644 index 00000000..55ba7455 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go @@ -0,0 +1,27 @@ +package path + +import ( + "fmt" + "path/filepath" + + "github.com/opencontainers/go-digest" +) + +// CleanAbsPath removes any ".." and "." from the path +// and ensures it starts with a "/". If the path refers to the root +// directory, it returns "/". +func CleanAbsPath(path string) string { + return filepath.Clean("/" + path) +} + +// RegularFilePath returns the path used in the composefs backing store for a +// regular file with the provided content digest. +// +// The caller MUST ensure d is a valid digest (in particular, that it contains no path separators or .. entries) +func RegularFilePathForValidatedDigest(d digest.Digest) (string, error) { + if algo := d.Algorithm(); algo != digest.SHA256 { + return "", fmt.Errorf("unexpected digest algorithm %q", algo) + } + e := d.Encoded() + return e[0:2] + "/" + e[2:], nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 8ecbfb98..f23a96b7 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -22,17 +22,21 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked/compressor" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" + path "github.com/containers/storage/pkg/chunked/internal/path" "github.com/containers/storage/pkg/chunked/toc" "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" + securejoin "github.com/cyphar/filepath-securejoin" jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/asm" + tsStorage "github.com/vbatts/tar-split/tar/storage" "golang.org/x/sys/unix" ) @@ -57,49 +61,57 @@ const ( type compressedFileType int type chunkedDiffer struct { + // Initial parameters, used throughout and never modified + // ========== + pullOptions pullOptions stream ImageSourceSeekable - manifest []byte - toc *internal.TOC // The parsed contents of manifest, or nil if not yet available - tarSplit []byte - layersCache *layersCache - tocOffset int64 - fileType compressedFileType - - copyBuffer []byte - - gzipReader *pgzip.Reader - zstdReader *zstd.Decoder - rawReader io.Reader - - // tocDigest is the digest of the TOC document when the layer - // is partially pulled. - tocDigest digest.Digest + // blobDigest is the digest of the whole compressed layer. It is used if + // convertToZstdChunked to validate a layer when it is converted since there + // is no TOC referenced by the manifest. + blobDigest digest.Digest + blobSize int64 + // Input format + // ========== + fileType compressedFileType // convertedToZstdChunked is set to true if the layer needs to // be converted to the zstd:chunked format before it can be // handled. convertToZstdChunked bool + // Chunked metadata + // This is usually set in NewDiffer, but if convertToZstdChunked, it is only computed in chunkedDiffer.ApplyDiff + // ========== + // tocDigest is the digest of the TOC document when the layer + // is partially pulled, or "" if not relevant to consumers. + tocDigest digest.Digest + tocOffset int64 + manifest []byte + toc *minimal.TOC // The parsed contents of manifest, or nil if not yet available + tarSplit *os.File + uncompressedTarSize int64 // -1 if unknown // skipValidation is set to true if the individual files in // the layer are trusted and should not be validated. skipValidation bool - // blobDigest is the digest of the whole compressed layer. It is used if - // convertToZstdChunked to validate a layer when it is converted since there - // is no TOC referenced by the manifest. - blobDigest digest.Digest - - blobSize int64 - uncompressedTarSize int64 // -1 if unknown - - pullOptions map[string]string - - useFsVerity graphdriver.DifferFsVerity + // Long-term caches + // This is set in NewDiffer, when the caller must not hold any storage locks, and later consumed in .ApplyDiff() + // ========== + layersCache *layersCache + copyBuffer []byte + fsVerityMutex sync.Mutex // protects fsVerityDigests fsVerityDigests map[string]string - fsVerityMutex sync.Mutex + + // Private state of .ApplyDiff + // ========== + gzipReader *pgzip.Reader + zstdReader *zstd.Decoder + rawReader io.Reader + useFsVerity graphdriver.DifferFsVerity + used bool // the differ object was already used and cannot be used again for .ApplyDiff } -var xattrsToIgnore = map[string]interface{}{ +var xattrsToIgnore = map[string]any{ "security.selinux": true, } @@ -108,22 +120,57 @@ type chunkedLayerData struct { Format graphdriver.DifferOutputFormat `json:"format"` } +// pullOptions contains parsed data from storage.Store.PullOptions. +// TO DO: ideally this should be parsed along with the rest of the config file into StoreOptions directly +// (and then storage.Store.PullOptions would need to be somehow simulated). +type pullOptions struct { + enablePartialImages bool // enable_partial_images + convertImages bool // convert_images + useHardLinks bool // use_hard_links + insecureAllowUnpredictableImageContents bool // insecure_allow_unpredictable_image_contents + ostreeRepos []string // ostree_repos +} + +func parsePullOptions(store storage.Store) pullOptions { + options := store.PullOptions() + + res := pullOptions{} + for _, e := range []struct { + dest *bool + name string + defaultValue bool + }{ + {&res.enablePartialImages, "enable_partial_images", false}, + {&res.convertImages, "convert_images", false}, + {&res.useHardLinks, "use_hard_links", false}, + {&res.insecureAllowUnpredictableImageContents, "insecure_allow_unpredictable_image_contents", false}, + } { + if value, ok := options[e.name]; ok { + *e.dest = strings.ToLower(value) == "true" + } else { + *e.dest = e.defaultValue + } + } + res.ostreeRepos = strings.Split(options["ostree_repos"], ":") + + return res +} + func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) { diff, err := archive.DecompressStream(payload) if err != nil { return 0, nil, "", nil, err } - fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) + defer diff.Close() + + f, err := openTmpFile(destDirectory) if err != nil { - return 0, nil, "", nil, &fs.PathError{Op: "open", Path: destDirectory, Err: err} + return 0, nil, "", nil, err } - f := os.NewFile(uintptr(fd), destDirectory) - newAnnotations := make(map[string]string) - level := 1 - chunked, err := compressor.ZstdCompressor(f, newAnnotations, &level) + chunked, err := compressor.NoCompression(f, newAnnotations) if err != nil { f.Close() return 0, nil, "", nil, err @@ -143,128 +190,179 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o return copied, newSeekableFile(f), convertedOutputDigester.Digest(), newAnnotations, nil } -// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. -// If it returns an error that implements IsErrFallbackToOrdinaryLayerDownload, the caller can +func (c *chunkedDiffer) Close() error { + if c.tarSplit != nil { + err := c.tarSplit.Close() + c.tarSplit = nil + return err + } + return nil +} + +// NewDiffer returns a differ than can be used with [Store.PrepareStagedLayer]. +// If it returns an error that matches ErrFallbackToOrdinaryLayerDownload, the caller can // retry the operation with a different method. -func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { - pullOptions := store.PullOptions() +// The caller must call Close() on the returned Differ. +func NewDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { + pullOptions := parsePullOptions(store) - if !parseBooleanPullOption(pullOptions, "enable_partial_images", false) { - // If convertImages is set, the two options disagree whether fallback is permissible. + if !pullOptions.enablePartialImages { + // If pullOptions.convertImages is set, the two options disagree whether fallback is permissible. // Right now, we enable it, but that’s not a promise; rather, such a configuration should ideally be rejected. return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("partial images are disabled")) } - // convertImages also serves as a “must not fallback to non-partial pull” option (?!) - convertImages := parseBooleanPullOption(pullOptions, "convert_images", false) + // pullOptions.convertImages also serves as a “must not fallback to non-partial pull” option (?!) graphDriver, err := store.GraphDriver() if err != nil { return nil, err } if _, partialSupported := graphDriver.(graphdriver.DriverWithDiffer); !partialSupported { - if convertImages { + if pullOptions.convertImages { return nil, fmt.Errorf("graph driver %s does not support partial pull but convert_images requires that", graphDriver.String()) } return nil, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("graph driver %s does not support partial pull", graphDriver.String())) } - differ, canFallback, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions) + differ, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions) if err != nil { - if !canFallback { + var fallbackErr ErrFallbackToOrdinaryLayerDownload + if !errors.As(err, &fallbackErr) { return nil, err } // If convert_images is enabled, always attempt to convert it instead of returning an error or falling back to a different method. - if convertImages { - logrus.Debugf("Created differ to convert blob %q", blobDigest) - return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions) + if !pullOptions.convertImages { + return nil, err + } + var canConvertErr errFallbackCanConvert + if !errors.As(err, &canConvertErr) { + // We are supposed to use makeConvertFromRawDiffer, but that would not work. + // Fail, and make sure the error does _not_ match ErrFallbackToOrdinaryLayerDownload: use only the error text, + // discard all type information. + return nil, fmt.Errorf("neither a partial pull nor convert_images is possible: %s", err.Error()) } - return nil, newErrFallbackToOrdinaryLayerDownload(err) + logrus.Debugf("Created differ to convert blob %q", blobDigest) + return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions) } return differ, nil } -// getProperDiffer is an implementation detail of GetDiffer. +// errFallbackCanConvert is an an error type _accompanying_ ErrFallbackToOrdinaryLayerDownload +// within getProperDiffer, to mark that using makeConvertFromRawDiffer makes sense. +// This is used to distinguish between cases where the environment does not support partial pulls +// (e.g. a registry does not support range requests) and convert_images is still possible, +// from cases where the image content is unacceptable for partial pulls (e.g. exceeds memory limits) +// and convert_images would not help. +type errFallbackCanConvert struct { + err error +} + +func (e errFallbackCanConvert) Error() string { + return e.err.Error() +} + +func (e errFallbackCanConvert) Unwrap() error { + return e.err +} + +// getProperDiffer is an implementation detail of NewDiffer. // It returns a “proper” differ (not a convert_images one) if possible. -// On error, the second parameter is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull) -// is permissible. -func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (graphdriver.Differ, bool, error) { - zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey] +// May return an error matching ErrFallbackToOrdinaryLayerDownload if a fallback to an alternative +// (either makeConvertFromRawDiffer, or a non-partial pull) is permissible. +func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (graphdriver.Differ, error) { + zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[minimal.ManifestChecksumKey] estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation] switch { case hasZstdChunkedTOC && hasEstargzTOC: - return nil, false, errors.New("both zstd:chunked and eStargz TOC found") + return nil, errors.New("both zstd:chunked and eStargz TOC found") case hasZstdChunkedTOC: zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString) if err != nil { - return nil, false, err + return nil, err } differ, err := makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions) if err != nil { logrus.Debugf("Could not create zstd:chunked differ for blob %q: %v", blobDigest, err) - // If the error is a bad request to the server, then signal to the caller that it can try a different method. - var badRequestErr ErrBadRequest - return nil, errors.As(err, &badRequestErr), err + return nil, err } logrus.Debugf("Created zstd:chunked differ for blob %q", blobDigest) - return differ, false, nil + return differ, nil case hasEstargzTOC: estargzTOCDigest, err := digest.Parse(estargzTOCDigestString) if err != nil { - return nil, false, err + return nil, err } differ, err := makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions) if err != nil { logrus.Debugf("Could not create estargz differ for blob %q: %v", blobDigest, err) - // If the error is a bad request to the server, then signal to the caller that it can try a different method. - var badRequestErr ErrBadRequest - return nil, errors.As(err, &badRequestErr), err + return nil, err } logrus.Debugf("Created eStargz differ for blob %q", blobDigest) - return differ, false, nil + return differ, nil default: // no TOC - convertImages := parseBooleanPullOption(pullOptions, "convert_images", false) - if !convertImages { - return nil, true, errors.New("no TOC found and convert_images is not configured") + message := "no TOC found" + if !pullOptions.convertImages { + message = "no TOC found and convert_images is not configured" + } + return nil, errFallbackCanConvert{ + newErrFallbackToOrdinaryLayerDownload(errors.New(message)), } - return nil, true, errors.New("no TOC found") } } -func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) { +func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) { layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ - fsVerityDigests: make(map[string]string), - blobDigest: blobDigest, - blobSize: blobSize, - uncompressedTarSize: -1, // Will be computed later + pullOptions: pullOptions, + stream: iss, + blobDigest: blobDigest, + blobSize: blobSize, + convertToZstdChunked: true, - copyBuffer: makeCopyBuffer(), - layersCache: layersCache, - pullOptions: pullOptions, - stream: iss, + + uncompressedTarSize: -1, // Will be computed later + + layersCache: layersCache, + copyBuffer: makeCopyBuffer(), + fsVerityDigests: make(map[string]string), }, nil } -func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) { - manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations) - if err != nil { +// makeZstdChunkedDiffer sets up a chunkedDiffer for a zstd:chunked layer. +// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert. +func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (_ *chunkedDiffer, retErr error) { + manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(store.RunRoot(), iss, tocDigest, annotations, true) + if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } + defer func() { + if tarSplit != nil && retErr != nil { + tarSplit.Close() + } + }() + var uncompressedTarSize int64 = -1 if tarSplit != nil { + if _, err := tarSplit.Seek(0, io.SeekStart); err != nil { + return nil, err + } uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit) if err != nil { return nil, fmt.Errorf("computing size from tar-split: %w", err) } + } else if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest. + return nil, errFallbackCanConvert{ + newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked layers without tar-split data don't support partial pulls with guaranteed consistency with non-partial pulls")), + } } layersCache, err := getLayersCache(store) @@ -273,25 +371,36 @@ func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest } return &chunkedDiffer{ - fsVerityDigests: make(map[string]string), - blobSize: blobSize, - uncompressedTarSize: uncompressedTarSize, + pullOptions: pullOptions, + stream: iss, + blobSize: blobSize, + + fileType: fileTypeZstdChunked, + tocDigest: tocDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeZstdChunked, - layersCache: layersCache, + tocOffset: tocOffset, manifest: manifest, toc: toc, - pullOptions: pullOptions, - stream: iss, tarSplit: tarSplit, - tocOffset: tocOffset, + uncompressedTarSize: uncompressedTarSize, + + layersCache: layersCache, + copyBuffer: makeCopyBuffer(), + fsVerityDigests: make(map[string]string), }, nil } -func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) { +// makeEstargzChunkedDiffer sets up a chunkedDiffer for an estargz layer. +// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert. +func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) { + if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest. + return nil, errFallbackCanConvert{ + newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("estargz layers don't support partial pulls with guaranteed consistency with non-partial pulls")), + } + } + manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest) - if err != nil { + if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } layersCache, err := getLayersCache(store) @@ -300,17 +409,20 @@ func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest dig } return &chunkedDiffer{ - fsVerityDigests: make(map[string]string), - blobSize: blobSize, - uncompressedTarSize: -1, // We would have to read and decompress the whole layer + pullOptions: pullOptions, + stream: iss, + blobSize: blobSize, + + fileType: fileTypeEstargz, + tocDigest: tocDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeEstargz, - layersCache: layersCache, - manifest: manifest, - pullOptions: pullOptions, - stream: iss, tocOffset: tocOffset, + manifest: manifest, + uncompressedTarSize: -1, // We would have to read and decompress the whole layer + + layersCache: layersCache, + copyBuffer: makeCopyBuffer(), + fsVerityDigests: make(map[string]string), }, nil } @@ -391,7 +503,7 @@ func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool { } // fill only the attributes used by canDedupMetadataWithHardLink. otherFile := fileMetadata{ - FileMetadata: internal.FileMetadata{ + FileMetadata: minimal.FileMetadata{ UID: int(st.Uid), GID: int(st.Gid), Mode: int64(st.Mode), @@ -546,27 +658,24 @@ func (o *originFile) OpenFile() (io.ReadCloser, error) { return nil, err } - if _, err := srcFile.Seek(o.Offset, 0); err != nil { + if _, err := srcFile.Seek(o.Offset, io.SeekStart); err != nil { srcFile.Close() return nil, err } return srcFile, nil } -func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) { +func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, mf *missingFileChunk) (compressedFileType, error) { switch { case partCompression == fileTypeHole: // The entire part is a hole. Do not need to read from a file. - c.rawReader = nil return fileTypeHole, nil case mf.Hole: // Only the missing chunk in the requested part refers to a hole. // The received data must be discarded. - limitReader := io.LimitReader(from, mf.CompressedSize) - _, err := io.CopyBuffer(io.Discard, limitReader, c.copyBuffer) + _, err := io.CopyBuffer(io.Discard, c.rawReader, c.copyBuffer) return fileTypeHole, err case partCompression == fileTypeZstdChunked: - c.rawReader = io.LimitReader(from, mf.CompressedSize) if c.zstdReader == nil { r, err := zstd.NewReader(c.rawReader) if err != nil { @@ -579,7 +688,6 @@ func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressed } } case partCompression == fileTypeEstargz: - c.rawReader = io.LimitReader(from, mf.CompressedSize) if c.gzipReader == nil { r, err := pgzip.NewReader(c.rawReader) if err != nil { @@ -592,7 +700,7 @@ func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressed } } case partCompression == fileTypeNoCompression: - c.rawReader = io.LimitReader(from, mf.UncompressedSize) + return fileTypeNoCompression, nil default: return partCompression, fmt.Errorf("unknown file type %q", c.fileType) } @@ -601,18 +709,12 @@ func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressed // hashHole writes SIZE zeros to the specified hasher func hashHole(h hash.Hash, size int64, copyBuffer []byte) error { - count := int64(len(copyBuffer)) - if size < count { - count = size - } + count := min(size, int64(len(copyBuffer))) for i := int64(0); i < count; i++ { copyBuffer[i] = 0 } for size > 0 { - count = int64(len(copyBuffer)) - if size < count { - count = size - } + count = min(size, int64(len(copyBuffer))) if _, err := h.Write(copyBuffer[:count]); err != nil { return err } @@ -735,7 +837,12 @@ func (d *destinationFile) Close() (Err error) { } } - return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) + mode := os.FileMode(d.metadata.Mode) + if d.options.ForceMask != nil { + mode = *d.options.ForceMask + } + + return setFileAttrs(d.dirfd, d.file, mode, d.metadata, d.options, false) } func closeDestinationFiles(files chan *destinationFile, errors chan error) { @@ -793,6 +900,7 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan for _, missingPart := range missingParts { var part io.ReadCloser partCompression := c.fileType + readingFromLocalFile := false switch { case missingPart.Hole: partCompression = fileTypeHole @@ -803,6 +911,7 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan return err } partCompression = fileTypeNoCompression + readingFromLocalFile = true case missingPart.SourceChunk != nil: select { case p := <-streams: @@ -836,7 +945,18 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan goto exit } - compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf) + c.rawReader = nil + if part != nil { + limit := mf.CompressedSize + // If we are reading from a source file, use the uncompressed size to limit the reader, because + // the compressed size refers to the original layer stream. + if readingFromLocalFile { + limit = mf.UncompressedSize + } + c.rawReader = io.LimitReader(part, limit) + } + + compression, err := c.prepareCompressedStreamToFile(partCompression, &mf) if err != nil { Err = err goto exit @@ -915,7 +1035,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { !missingParts[prevIndex].Hole && !missingParts[i].Hole && len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 && missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name { - missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + missingParts[prevIndex].SourceChunk.Length += gap + missingParts[i].SourceChunk.Length missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize } else { @@ -973,7 +1093,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { } else { gap := getGap(missingParts, i) prev := &newMissingParts[len(newMissingParts)-1] - prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + prev.SourceChunk.Length += gap + missingParts[i].SourceChunk.Length prev.Hole = false prev.OriginFile = nil if gap > 0 { @@ -1038,13 +1158,6 @@ type hardLinkToCreate struct { metadata *fileMetadata } -func parseBooleanPullOption(pullOptions map[string]string, name string, def bool) bool { - if value, ok := pullOptions[name]; ok { - return strings.ToLower(value) == "true" - } - return def -} - type findAndCopyFileOptions struct { useHardLinks bool ostreeRepos []string @@ -1111,10 +1224,13 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *fileMetadata, copyOptions return false, nil } -func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) { +// makeEntriesFlat collects regular-file entries from mergedEntries, and produces a new list +// where each file content is only represented once, and uses composefs.RegularFilePathForValidatedDigest for its name. +// If flatPathNameMap is not nil, this function writes to it a mapping from filepath.Clean(originalName) to the composefs name. +func makeEntriesFlat(mergedEntries []fileMetadata, flatPathNameMap map[string]string) ([]fileMetadata, error) { var new []fileMetadata - hashes := make(map[string]string) + knownFlatPaths := make(map[string]struct{}) for i := range mergedEntries { if mergedEntries[i].Type != TypeReg { continue @@ -1124,16 +1240,22 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) { } digest, err := digest.Parse(mergedEntries[i].Digest) if err != nil { - return nil, err + return nil, fmt.Errorf("invalid digest %q for %q: %w", mergedEntries[i].Digest, mergedEntries[i].Name, err) + } + path, err := path.RegularFilePathForValidatedDigest(digest) + if err != nil { + return nil, fmt.Errorf("determining physical file path for %q: %w", mergedEntries[i].Name, err) + } + if flatPathNameMap != nil { + flatPathNameMap[filepath.Clean(mergedEntries[i].Name)] = path } - d := digest.Encoded() - if hashes[d] != "" { + if _, known := knownFlatPaths[path]; known { continue } - hashes[d] = d + knownFlatPaths[path] = struct{}{} - mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:]) + mergedEntries[i].Name = path mergedEntries[i].skipSetAttrs = true new = append(new, mergedEntries[i]) @@ -1141,45 +1263,146 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) { return new, nil } -func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) { - var payload io.ReadCloser - var streams chan io.ReadCloser - var errs chan error - var err error +type streamOrErr struct { + stream io.ReadCloser + err error +} - chunksToRequest := []ImageSourceChunk{ - { - Offset: 0, - Length: uint64(c.blobSize), - }, +// ensureAllBlobsDone ensures that all blobs are closed and returns the first error encountered. +func ensureAllBlobsDone(streamsOrErrors chan streamOrErr) (retErr error) { + for soe := range streamsOrErrors { + if soe.stream != nil { + _ = soe.stream.Close() + } else if retErr == nil { + retErr = soe.err + } } + return +} - streams, errs, err = c.stream.GetBlobAt(chunksToRequest) - if err != nil { - return "", err +// getBlobAtConverterGoroutine reads from the streams and errs channels, then sends +// either a stream or an error to the stream channel. The streams channel is closed when +// there are no more streams and errors to read. +// It ensures that no more than maxStreams streams are returned, and that every item from the +// streams and errs channels is consumed. +func getBlobAtConverterGoroutine(stream chan streamOrErr, streams chan io.ReadCloser, errs chan error, maxStreams int) { + tooManyStreams := false + streamsSoFar := 0 + + err := errors.New("unexpected error in getBlobAtGoroutine") + + defer func() { + if err != nil { + stream <- streamOrErr{err: err} + } + close(stream) + }() + +loop: + for { + select { + case p, ok := <-streams: + if !ok { + streams = nil + break loop + } + if streamsSoFar >= maxStreams { + tooManyStreams = true + _ = p.Close() + continue + } + streamsSoFar++ + stream <- streamOrErr{stream: p} + case err, ok := <-errs: + if !ok { + errs = nil + break loop + } + stream <- streamOrErr{err: err} + } } - select { - case p := <-streams: - payload = p - case err := <-errs: - return "", err + if streams != nil { + for p := range streams { + if streamsSoFar >= maxStreams { + tooManyStreams = true + _ = p.Close() + continue + } + streamsSoFar++ + stream <- streamOrErr{stream: p} + } } - if payload == nil { - return "", errors.New("invalid stream returned") + if errs != nil { + for err := range errs { + stream <- streamOrErr{err: err} + } + } + if tooManyStreams { + stream <- streamOrErr{err: fmt.Errorf("too many streams returned, got more than %d", maxStreams)} } - defer payload.Close() + err = nil +} - originalRawDigester := digest.Canonical.Digester() +// getBlobAt provides a much more convenient way to consume data returned by ImageSourceSeekable.GetBlobAt. +// GetBlobAt returns two channels, forcing a caller to `select` on both of them — and in Go, reading a closed channel +// always succeeds in select. +// Instead, getBlobAt provides a single channel with all events, which can be consumed conveniently using `range`. +func getBlobAt(is ImageSourceSeekable, chunksToRequest ...ImageSourceChunk) (chan streamOrErr, error) { + streams, errs, err := is.GetBlobAt(chunksToRequest) + if err != nil { + return nil, err + } + stream := make(chan streamOrErr) + go getBlobAtConverterGoroutine(stream, streams, errs, len(chunksToRequest)) + return stream, nil +} - r := io.TeeReader(payload, originalRawDigester.Hash()) +func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) { + streamsOrErrors, err := getBlobAt(c.stream, ImageSourceChunk{Offset: 0, Length: uint64(c.blobSize)}) + if err != nil { + return "", err + } - // copy the entire tarball and compute its digest - _, err = io.CopyBuffer(destination, r, c.copyBuffer) + originalRawDigester := digest.Canonical.Digester() + for soe := range streamsOrErrors { + if soe.stream != nil { + r := io.TeeReader(soe.stream, originalRawDigester.Hash()) + // copy the entire tarball and compute its digest + _, err = io.CopyBuffer(destination, r, c.copyBuffer) + _ = soe.stream.Close() + } + if soe.err != nil && err == nil { + err = soe.err + } + } return originalRawDigester.Digest(), err } +func typeToOsMode(typ string) (os.FileMode, error) { + switch typ { + case TypeReg, TypeLink: + return 0, nil + case TypeSymlink: + return os.ModeSymlink, nil + case TypeDir: + return os.ModeDir, nil + case TypeChar: + return os.ModeDevice | os.ModeCharDevice, nil + case TypeBlock: + return os.ModeDevice, nil + case TypeFifo: + return os.ModeNamedPipe, nil + } + return 0, fmt.Errorf("unknown file type %q", typ) +} + func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) { + if c.used { + return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: chunked differ already used") + } + c.used = true + defer c.layersCache.release() defer func() { if c.zstdReader != nil { @@ -1225,7 +1448,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if err != nil { return graphdriver.DriverWithDifferOutput{}, err } + c.uncompressedTarSize = tarSize + // fileSource is a O_TMPFILE file descriptor, so we // need to keep it open until the entire file is processed. defer fileSource.Close() @@ -1241,7 +1466,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if tocDigest == nil { return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: just-created zstd:chunked missing TOC digest") } - manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, *tocDigest, annotations) + manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(dest, fileSource, *tocDigest, annotations, false) if err != nil { return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -1250,7 +1475,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff stream = fileSource // fill the chunkedDiffer with the data we just read. - c.fileType = fileTypeZstdChunked + c.fileType = fileTypeNoCompression c.manifest = manifest c.toc = toc c.tarSplit = tarSplit @@ -1289,7 +1514,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff bigDataKey: c.manifest, chunkedLayerDataKey: lcdBigData, }, - Artifacts: map[string]interface{}{ + Artifacts: map[string]any{ tocKey: toc, }, TOCDigest: c.tocDigest, @@ -1298,13 +1523,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff Size: c.uncompressedTarSize, } - // When the hard links deduplication is used, file attributes are ignored because setting them - // modifies the source file as well. - useHardLinks := parseBooleanPullOption(c.pullOptions, "use_hard_links", false) - - // List of OSTree repositories to use for deduplication - ostreeRepos := strings.Split(c.pullOptions["ostree_repos"], ":") - whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) var missingParts []missingPart @@ -1325,7 +1543,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if err == nil { value := idtools.Stat{ IDs: idtools.IDPair{UID: int(uid), GID: int(gid)}, - Mode: os.FileMode(mode), + Mode: os.ModeDir | os.FileMode(mode), } if err := idtools.SetContainersOverrideXattr(dest, value); err != nil { return output, err @@ -1337,16 +1555,20 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if err != nil { return output, &fs.PathError{Op: "open", Path: dest, Err: err} } - defer unix.Close(dirfd) + dirFile := os.NewFile(uintptr(dirfd), dest) + defer dirFile.Close() + var flatPathNameMap map[string]string // = nil if differOpts != nil && differOpts.Format == graphdriver.DifferOutputFormatFlat { - mergedEntries, err = makeEntriesFlat(mergedEntries) + flatPathNameMap = map[string]string{} + mergedEntries, err = makeEntriesFlat(mergedEntries, flatPathNameMap) if err != nil { return output, err } createdDirs := make(map[string]struct{}) for _, e := range mergedEntries { - d := e.Name[0:2] + // This hard-codes an assumption that RegularFilePathForValidatedDigest creates paths with exactly one directory component. + d := filepath.Dir(e.Name) if _, found := createdDirs[d]; !found { if err := unix.Mkdirat(dirfd, d, 0o755); err != nil { return output, &fs.PathError{Op: "mkdirat", Path: d, Err: err} @@ -1363,8 +1585,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff missingPartsSize, totalChunksSize := int64(0), int64(0) copyOptions := findAndCopyFileOptions{ - useHardLinks: useHardLinks, - ostreeRepos: ostreeRepos, + // When the hard links deduplication is used, file attributes are ignored because setting them + // modifies the source file as well. + useHardLinks: c.pullOptions.useHardLinks, + ostreeRepos: c.pullOptions.ostreeRepos, // List of OSTree repositories to use for deduplication options: options, } @@ -1408,13 +1632,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff filesToWaitFor := 0 for i := range mergedEntries { r := &mergedEntries[i] - if options.ForceMask != nil { - value := idtools.FormatContainersOverrideXattr(r.UID, r.GID, int(r.Mode)) - if r.Xattrs == nil { - r.Xattrs = make(map[string]string) - } - r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) - } mode := os.FileMode(r.Mode) @@ -1423,10 +1640,37 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff return output, err } - r.Name = filepath.Clean(r.Name) + size := r.Size + + // update also the implementation of ForceMask in pkg/archive + if options.ForceMask != nil { + mode = *options.ForceMask + + // special files will be stored as regular files + if t != tar.TypeDir && t != tar.TypeSymlink && t != tar.TypeReg && t != tar.TypeLink { + t = tar.TypeReg + size = 0 + } + + // if the entry will be stored as a directory or a regular file, store in a xattr the original + // owner and mode. + if t == tar.TypeDir || t == tar.TypeReg { + typeMode, err := typeToOsMode(r.Type) + if err != nil { + return output, err + } + value := idtools.FormatContainersOverrideXattrDevice(r.UID, r.GID, typeMode|fs.FileMode(r.Mode), int(r.Devmajor), int(r.Devminor)) + if r.Xattrs == nil { + r.Xattrs = make(map[string]string) + } + r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) + } + } + + r.Name = path.CleanAbsPath(r.Name) // do not modify the value of symlinks if r.Linkname != "" && t != tar.TypeSymlink { - r.Linkname = filepath.Clean(r.Linkname) + r.Linkname = path.CleanAbsPath(r.Linkname) } if whiteoutConverter != nil { @@ -1434,8 +1678,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff Typeflag: t, Name: r.Name, Linkname: r.Linkname, - Size: r.Size, - Mode: r.Mode, + Size: size, + Mode: int64(mode), Uid: r.UID, Gid: r.GID, } @@ -1454,7 +1698,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff switch t { case tar.TypeReg: // Create directly empty files. - if r.Size == 0 { + if size == 0 { // Used to have a scope for cleanup. createEmptyFile := func() error { file, err := openFileUnderRoot(dirfd, r.Name, newFileFlags, 0) @@ -1474,7 +1718,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } case tar.TypeDir: - if r.Name == "" || r.Name == "." { + if r.Name == "/" { output.RootDirMode = &mode } if err := safeMkdir(dirfd, mode, r.Name, r, options); err != nil { @@ -1509,7 +1753,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff return output, fmt.Errorf("invalid type %q", t) } - totalChunksSize += r.Size + totalChunksSize += size if t == tar.TypeReg { index := i @@ -1548,7 +1792,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff // the file is missing, attempt to find individual chunks. for _, chunk := range r.chunks { - compressedSize := int64(chunk.EndOffset - chunk.Offset) + compressedSize := chunk.EndOffset - chunk.Offset size := remainingSize if chunk.ChunkSize > 0 { size = chunk.ChunkSize @@ -1572,7 +1816,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } switch chunk.ChunkType { - case internal.ChunkTypeData: + case minimal.ChunkTypeData: root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk) if err != nil { return output, err @@ -1585,7 +1829,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff Offset: offset, } } - case internal.ChunkTypeZeros: + case minimal.ChunkTypeZeros: missingPartsSize -= size mp.Hole = true // Mark all chunks belonging to the missing part as holes @@ -1609,16 +1853,55 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } } + // To ensure that consumers of the layer who decompress and read the full tar stream, + // and consumers who consume the data via the TOC, both see exactly the same data and metadata, + // compute the UncompressedDigest. + // c/image will then ensure that this value matches the value in the image config’s RootFS.DiffID, i.e. the image must commit + // to one UncompressedDigest value for each layer, and that will avoid the ambiguity (in consumers who validate layers against DiffID). + // + // c/image also uses the UncompressedDigest as a layer ID, allowing it to use the traditional layer and image IDs. + // + // This is, sadly, quite costly: Up to now we might have only have had to write, and digest, only the new/modified files. + // Here we need to read, and digest, the whole layer, even if almost all of it was already present locally previously. + // So, really specialized (EXTREMELY RARE) users can opt out of this check using insecureAllowUnpredictableImageContents . + // + // Layers without a tar-split (estargz layers and old zstd:chunked layers) can't produce an UncompressedDigest that + // matches the expected RootFS.DiffID; we always fall back to full pulls, again unless the user opts out + // via insecureAllowUnpredictableImageContents . + if output.UncompressedDigest == "" { + switch { + case c.pullOptions.insecureAllowUnpredictableImageContents: + // Oh well. Skip the costly digest computation. + case output.TarSplit != nil: + if _, err := output.TarSplit.Seek(0, io.SeekStart); err != nil { + return output, err + } + metadata := tsStorage.NewJSONUnpacker(output.TarSplit) + fg := newStagedFileGetter(dirFile, flatPathNameMap) + digester := digest.Canonical.Digester() + if err := asm.WriteOutputTarStream(fg, metadata, digester.Hash()); err != nil { + return output, fmt.Errorf("digesting staged uncompressed stream: %w", err) + } + output.UncompressedDigest = digester.Digest() + default: + // We are checking for this earlier in NewDiffer, so this should not be reachable. + return output, fmt.Errorf(`internal error: layer's UncompressedDigest is unknown and "insecure_allow_unpredictable_image_contents" is not set`) + } + } + if totalChunksSize > 0 { logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } output.Artifacts[fsVerityDigestsKey] = c.fsVerityDigests + // on success steal the reference to the tarSplit file + c.tarSplit = nil + return output, nil } -func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { +func mustSkipFile(fileType compressedFileType, e minimal.FileMetadata) bool { // ignore the metadata files for the estargz format. if fileType != fileTypeEstargz { return false @@ -1631,7 +1914,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { return false } -func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, error) { +func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []minimal.FileMetadata) ([]fileMetadata, error) { countNextChunks := func(start int) int { count := 0 for _, e := range entries[start:] { @@ -1668,7 +1951,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i if e.Type == TypeReg { nChunks := countNextChunks(i + 1) - e.chunks = make([]*internal.FileMetadata, nChunks+1) + e.chunks = make([]*minimal.FileMetadata, nChunks+1) for j := 0; j <= nChunks; j++ { // we need a copy here, otherwise we override the // .Size later @@ -1703,7 +1986,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i // validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the // same digest as chunk.ChunkDigest -func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { +func validateChunkChecksum(chunk *minimal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0) if err != nil { return false @@ -1716,7 +1999,7 @@ func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offs } defer fd.Close() - if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil { + if _, err := fd.Seek(offset, io.SeekStart); err != nil { return false } @@ -1734,3 +2017,33 @@ func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offs return digester.Digest() == digest } + +// newStagedFileGetter returns an object usable as storage.FileGetter for rootDir. +// if flatPathNameMap is not nil, it must be used to map logical file names into the backing file paths. +func newStagedFileGetter(rootDir *os.File, flatPathNameMap map[string]string) *stagedFileGetter { + return &stagedFileGetter{ + rootDir: rootDir, + flatPathNameMap: flatPathNameMap, + } +} + +type stagedFileGetter struct { + rootDir *os.File + flatPathNameMap map[string]string // nil, or a map from filepath.Clean()ed tar file names to expected on-filesystem names +} + +func (fg *stagedFileGetter) Get(filename string) (io.ReadCloser, error) { + if fg.flatPathNameMap != nil { + path, ok := fg.flatPathNameMap[filepath.Clean(filename)] + if !ok { + return nil, fmt.Errorf("no path mapping exists for tar entry %q", filename) + } + filename = path + } + pathFD, err := securejoin.OpenatInRoot(fg.rootDir, filename) + if err != nil { + return nil, err + } + defer pathFD.Close() + return securejoin.Reopen(pathFD, unix.O_RDONLY) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go index 7be0adeb..7e73a145 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -11,7 +11,8 @@ import ( digest "github.com/opencontainers/go-digest" ) -// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. -func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { - return nil, errors.New("format not supported on this system") +// NewDiffer returns a differ than can be used with [Store.PrepareStagedLayer]. +// The caller must call Close() on the returned Differ. +func NewDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { + return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("format not supported on this system")) } diff --git a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go index 6fbaa41b..6f39b2ae 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go +++ b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go @@ -3,7 +3,7 @@ package toc import ( "errors" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" digest "github.com/opencontainers/go-digest" ) @@ -19,7 +19,7 @@ const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" // This is an experimental feature and may be changed/removed in the future. func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { d1, ok1 := annotations[tocJSONDigestAnnotation] - d2, ok2 := annotations[internal.ManifestChecksumKey] + d2, ok2 := annotations[minimal.ManifestChecksumKey] switch { case ok1 && ok2: return nil, errors.New("both zstd:chunked and eStargz TOC found") diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go index 08060f2a..9855abd1 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -42,13 +42,11 @@ func Usage(dir string) (usage *DiskUsage, err error) { // Check inode to only count the sizes of files with multiple hard links once. inode := fileInfo.Sys().(*syscall.Stat_t).Ino - // inode is not a uint64 on all platforms. Cast it to avoid issues. - if _, exists := data[uint64(inode)]; exists { + if _, exists := data[inode]; exists { return nil } - // inode is not a uint64 on all platforms. Cast it to avoid issues. - data[uint64(inode)] = struct{}{} + data[inode] = struct{}{} // Ignore directory sizes if entry.IsDir() { return nil diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go index 785b1331..04cfafcd 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go @@ -13,7 +13,7 @@ import ( func Exists(path string) error { // It uses unix.Faccessat which is a faster operation compared to os.Stat for // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0) + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_EACCESS) if err != nil { return &os.PathError{Op: "faccessat", Path: path, Err: err} } @@ -25,7 +25,7 @@ func Exists(path string) error { func Lexists(path string) error { // It uses unix.Faccessat which is a faster operation compared to os.Stat for // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW) + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW|unix.AT_EACCESS) if err != nil { return &os.PathError{Op: "faccessat", Path: path, Err: err} } diff --git a/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go b/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go new file mode 100644 index 00000000..9f5c6c90 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go @@ -0,0 +1,20 @@ +package fileutils + +import ( + "io" + "os" + + "golang.org/x/sys/unix" +) + +// ReflinkOrCopy attempts to reflink the source to the destination fd. +// If reflinking fails or is unsupported, it falls back to io.Copy(). +func ReflinkOrCopy(src, dst *os.File) error { + err := unix.IoctlFileClone(int(dst.Fd()), int(src.Fd())) + if err == nil { + return nil + } + + _, err = io.Copy(dst, src) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go b/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go new file mode 100644 index 00000000..c0a30e67 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go @@ -0,0 +1,15 @@ +//go:build !linux + +package fileutils + +import ( + "io" + "os" +) + +// ReflinkOrCopy attempts to reflink the source to the destination fd. +// If reflinking fails or is unsupported, it falls back to io.Copy(). +func ReflinkOrCopy(src, dst *os.File) error { + _, err := io.Copy(dst, src) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go index db31da76..fac9a2f8 100644 --- a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go +++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go @@ -25,7 +25,7 @@ func CreateIDMappedMount(source, target string, pid int) error { } defer userNsFile.Close() - targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE) + targetDirFd, err := unix.OpenTree(unix.AT_FDCWD, source, unix.OPEN_TREE_CLONE) if err != nil { return &os.PathError{Op: "open_tree", Path: source, Err: err} } @@ -33,8 +33,9 @@ func CreateIDMappedMount(source, target string, pid int) error { if err := unix.MountSetattr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE, &unix.MountAttr{ - Attr_set: unix.MOUNT_ATTR_IDMAP, - Userns_fd: uint64(userNsFile.Fd()), + Attr_set: unix.MOUNT_ATTR_IDMAP, + Userns_fd: uint64(userNsFile.Fd()), + Propagation: unix.MS_PRIVATE, }); err != nil { return &os.PathError{Op: "mount_setattr", Path: source, Err: err} } diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 248594e9..13277f09 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -4,6 +4,7 @@ import ( "bufio" "errors" "fmt" + "io/fs" "os" "os/user" "runtime" @@ -369,75 +370,172 @@ func checkChownErr(err error, name string, uid, gid int) error { // Stat contains file states that can be overridden with ContainersOverrideXattr. type Stat struct { - IDs IDPair - Mode os.FileMode + IDs IDPair + Mode os.FileMode + Major int + Minor int } // FormatContainersOverrideXattr will format the given uid, gid, and mode into a string // that can be used as the value for the ContainersOverrideXattr xattr. func FormatContainersOverrideXattr(uid, gid, mode int) string { - return fmt.Sprintf("%d:%d:0%o", uid, gid, mode&0o7777) + return FormatContainersOverrideXattrDevice(uid, gid, fs.FileMode(mode), 0, 0) +} + +// FormatContainersOverrideXattrDevice will format the given uid, gid, and mode into a string +// that can be used as the value for the ContainersOverrideXattr xattr. For devices, it also +// needs the major and minor numbers. +func FormatContainersOverrideXattrDevice(uid, gid int, mode fs.FileMode, major, minor int) string { + typ := "" + switch mode & os.ModeType { + case os.ModeDir: + typ = "dir" + case os.ModeSymlink: + typ = "symlink" + case os.ModeNamedPipe: + typ = "pipe" + case os.ModeSocket: + typ = "socket" + case os.ModeDevice: + typ = fmt.Sprintf("block-%d-%d", major, minor) + case os.ModeDevice | os.ModeCharDevice: + typ = fmt.Sprintf("char-%d-%d", major, minor) + default: + typ = "file" + } + unixMode := mode & os.ModePerm + if mode&os.ModeSetuid != 0 { + unixMode |= 0o4000 + } + if mode&os.ModeSetgid != 0 { + unixMode |= 0o2000 + } + if mode&os.ModeSticky != 0 { + unixMode |= 0o1000 + } + return fmt.Sprintf("%d:%d:%04o:%s", uid, gid, unixMode, typ) } // GetContainersOverrideXattr will get and decode ContainersOverrideXattr. func GetContainersOverrideXattr(path string) (Stat, error) { - var stat Stat xstat, err := system.Lgetxattr(path, ContainersOverrideXattr) if err != nil { - return stat, err + return Stat{}, err } + return parseOverrideXattr(xstat) // This will fail if (xstat, err) == (nil, nil), i.e. the xattr does not exist. +} +func parseOverrideXattr(xstat []byte) (Stat, error) { + var stat Stat attrs := strings.Split(string(xstat), ":") - if len(attrs) != 3 { - return stat, fmt.Errorf("The number of clons in %s does not equal to 3", + if len(attrs) < 3 { + return stat, fmt.Errorf("the number of parts in %s is less than 3", ContainersOverrideXattr) } value, err := strconv.ParseUint(attrs[0], 10, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse UID: %w", err) + return stat, fmt.Errorf("failed to parse UID: %w", err) } - stat.IDs.UID = int(value) - value, err = strconv.ParseUint(attrs[0], 10, 32) + value, err = strconv.ParseUint(attrs[1], 10, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse GID: %w", err) + return stat, fmt.Errorf("failed to parse GID: %w", err) } - stat.IDs.GID = int(value) value, err = strconv.ParseUint(attrs[2], 8, 32) if err != nil { - return stat, fmt.Errorf("Failed to parse mode: %w", err) + return stat, fmt.Errorf("failed to parse mode: %w", err) + } + stat.Mode = os.FileMode(value) & os.ModePerm + if value&0o1000 != 0 { + stat.Mode |= os.ModeSticky + } + if value&0o2000 != 0 { + stat.Mode |= os.ModeSetgid + } + if value&0o4000 != 0 { + stat.Mode |= os.ModeSetuid } - stat.Mode = os.FileMode(value) - + if len(attrs) > 3 { + typ := attrs[3] + if strings.HasPrefix(typ, "file") { + } else if strings.HasPrefix(typ, "dir") { + stat.Mode |= os.ModeDir + } else if strings.HasPrefix(typ, "symlink") { + stat.Mode |= os.ModeSymlink + } else if strings.HasPrefix(typ, "pipe") { + stat.Mode |= os.ModeNamedPipe + } else if strings.HasPrefix(typ, "socket") { + stat.Mode |= os.ModeSocket + } else if strings.HasPrefix(typ, "block") { + stat.Mode |= os.ModeDevice + stat.Major, stat.Minor, err = parseDevice(typ) + if err != nil { + return stat, err + } + } else if strings.HasPrefix(typ, "char") { + stat.Mode |= os.ModeDevice | os.ModeCharDevice + stat.Major, stat.Minor, err = parseDevice(typ) + if err != nil { + return stat, err + } + } else { + return stat, fmt.Errorf("invalid file type %s", typ) + } + } return stat, nil } +func parseDevice(typ string) (int, int, error) { + parts := strings.Split(typ, "-") + // If there are more than 3 parts, just ignore them to be forward compatible + if len(parts) < 3 { + return 0, 0, fmt.Errorf("invalid device type %s", typ) + } + if parts[0] != "block" && parts[0] != "char" { + return 0, 0, fmt.Errorf("invalid device type %s", typ) + } + major, err := strconv.Atoi(parts[1]) + if err != nil { + return 0, 0, fmt.Errorf("failed to parse major number: %w", err) + } + minor, err := strconv.Atoi(parts[2]) + if err != nil { + return 0, 0, fmt.Errorf("failed to parse minor number: %w", err) + } + return major, minor, nil +} + // SetContainersOverrideXattr will encode and set ContainersOverrideXattr. func SetContainersOverrideXattr(path string, stat Stat) error { - value := FormatContainersOverrideXattr(stat.IDs.UID, stat.IDs.GID, int(stat.Mode)) + value := FormatContainersOverrideXattrDevice(stat.IDs.UID, stat.IDs.GID, stat.Mode, stat.Major, stat.Minor) return system.Lsetxattr(path, ContainersOverrideXattr, []byte(value), 0) } func SafeChown(name string, uid, gid int) error { if runtime.GOOS == "darwin" { - var mode os.FileMode = 0o0700 + stat := Stat{ + Mode: os.FileMode(0o0700), + } xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) - if err == nil { - attrs := strings.Split(string(xstat), ":") - if len(attrs) == 3 { - val, err := strconv.ParseUint(attrs[2], 8, 32) - if err == nil { - mode = os.FileMode(val) - } + if err == nil && xstat != nil { + stat, err = parseOverrideXattr(xstat) + if err != nil { + return err + } + } else { + st, err := os.Stat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode. + if err != nil { + return err } + stat.Mode = st.Mode() } - value := Stat{IDPair{uid, gid}, mode} - if err = SetContainersOverrideXattr(name, value); err != nil { + stat.IDs = IDPair{UID: uid, GID: gid} + if err = SetContainersOverrideXattr(name, stat); err != nil { return err } uid = os.Getuid() @@ -453,19 +551,24 @@ func SafeChown(name string, uid, gid int) error { func SafeLchown(name string, uid, gid int) error { if runtime.GOOS == "darwin" { - var mode os.FileMode = 0o0700 + stat := Stat{ + Mode: os.FileMode(0o0700), + } xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) - if err == nil { - attrs := strings.Split(string(xstat), ":") - if len(attrs) == 3 { - val, err := strconv.ParseUint(attrs[2], 8, 32) - if err == nil { - mode = os.FileMode(val) - } + if err == nil && xstat != nil { + stat, err = parseOverrideXattr(xstat) + if err != nil { + return err + } + } else { + st, err := os.Lstat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode. + if err != nil { + return err } + stat.Mode = st.Mode() } - value := Stat{IDPair{uid, gid}, mode} - if err = SetContainersOverrideXattr(name, value); err != nil { + stat.IDs = IDPair{UID: uid, GID: gid} + if err = SetContainersOverrideXattr(name, stat); err != nil { return err } uid = os.Getuid() diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go index 2bd26d0e..9a17f570 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -5,6 +5,7 @@ package idtools import ( "errors" "os/user" + "sync" "unsafe" ) @@ -13,16 +14,14 @@ import ( #include #include #include -const char *Prog = "storage"; -FILE *shadow_logfd = NULL; struct subid_range get_range(struct subid_range *ranges, int i) { - shadow_logfd = stderr; - return ranges[i]; + return ranges[i]; } #if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) +# define subid_init libsubid_init # define subid_get_uid_ranges get_subuid_ranges # define subid_get_gid_ranges get_subgid_ranges #endif @@ -30,6 +29,8 @@ struct subid_range get_range(struct subid_range *ranges, int i) */ import "C" +var onceInit sync.Once + func readSubid(username string, isUser bool) (ranges, error) { var ret ranges uidstr := "" @@ -42,6 +43,10 @@ func readSubid(username string, isUser bool) (ranges, error) { uidstr = u.Uid } + onceInit.Do(func() { + C.subid_init(C.CString("storage"), C.stderr) + }) + cUsername := C.CString(username) defer C.free(unsafe.Pointer(cUsername)) diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go index 72a04f34..cf605803 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go @@ -93,10 +93,7 @@ loop0: } // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } + nextCap := min(b.Cap()*2, maxCap) bp.buf = append(bp.buf, getBuffer(nextCap)) } bp.wait.Broadcast() @@ -178,7 +175,7 @@ func getBuffer(size int) *fixedBuffer { bufPoolsLock.Lock() pool, ok := bufPools[size] if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + pool = &sync.Pool{New: func() any { return &fixedBuffer{buf: make([]byte, 0, size)} }} bufPools[size] = pool } bufPoolsLock.Unlock() diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writers.go b/vendor/github.com/containers/storage/pkg/ioutils/writers.go index ccc7f9c2..0b6d0a7a 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/writers.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/writers.go @@ -36,9 +36,9 @@ func (r *writeCloserWrapper) Close() error { } // NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { +func NewWriteCloserWrapper(w io.Writer, closer func() error) io.WriteCloser { return &writeCloserWrapper{ - Writer: r, + Writer: w, closer: closer, } } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go index 52f6c0a6..dfe81c24 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go @@ -6,6 +6,8 @@ import ( "path/filepath" "sync" "time" + + "github.com/containers/storage/internal/rawfilelock" ) // A Locker represents a file lock where the file is used to cache an @@ -55,13 +57,6 @@ type Locker interface { AssertLockedForWriting() } -type lockType byte - -const ( - readLock lockType = iota - writeLock -) - // LockFile represents a file lock where the file is used to cache an // identifier of the last party that made changes to whatever's being protected // by the lock. @@ -79,12 +74,12 @@ type LockFile struct { stateMutex *sync.Mutex counter int64 lw LastWrite // A global value valid as of the last .Touch() or .Modified() - lockType lockType + lockType rawfilelock.LockType locked bool // The following fields are only modified on transitions between counter == 0 / counter != 0. // Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking. // In other cases, they need to be protected using stateMutex. - fd fileHandle + fd rawfilelock.FileHandle } var ( @@ -129,12 +124,12 @@ func (l *LockFile) Lock() { if l.ro { panic("can't take write lock on read-only lock file") } - l.lock(writeLock) + l.lock(rawfilelock.WriteLock) } // RLock locks the lockfile as a reader. func (l *LockFile) RLock() { - l.lock(readLock) + l.lock(rawfilelock.ReadLock) } // TryLock attempts to lock the lockfile as a writer. Panic if the lock is a read-only one. @@ -142,12 +137,12 @@ func (l *LockFile) TryLock() error { if l.ro { panic("can't take write lock on read-only lock file") } - return l.tryLock(writeLock) + return l.tryLock(rawfilelock.WriteLock) } // TryRLock attempts to lock the lockfile as a reader. func (l *LockFile) TryRLock() error { - return l.tryLock(readLock) + return l.tryLock(rawfilelock.ReadLock) } // Unlock unlocks the lockfile. @@ -172,9 +167,9 @@ func (l *LockFile) Unlock() { l.locked = false // Close the file descriptor on the last unlock, releasing the // file lock. - unlockAndCloseHandle(l.fd) + rawfilelock.UnlockAndCloseHandle(l.fd) } - if l.lockType == readLock { + if l.lockType == rawfilelock.ReadLock { l.rwMutex.RUnlock() } else { l.rwMutex.Unlock() @@ -206,7 +201,7 @@ func (l *LockFile) AssertLockedForWriting() { l.AssertLocked() // Like AssertLocked, don’t even bother with l.stateMutex. - if l.lockType == readLock { + if l.lockType == rawfilelock.ReadLock { panic("internal error: lock is not held for writing") } } @@ -273,7 +268,7 @@ func (l *LockFile) Touch() error { return err } l.stateMutex.Lock() - if !l.locked || (l.lockType == readLock) { + if !l.locked || (l.lockType == rawfilelock.ReadLock) { panic("attempted to update last-writer in lockfile without the write lock") } defer l.stateMutex.Unlock() @@ -324,6 +319,24 @@ func getLockfile(path string, ro bool) (*LockFile, error) { return lockFile, nil } +// openLock opens a lock file at the specified path, creating the parent directory if it does not exist. +func openLock(path string, readOnly bool) (rawfilelock.FileHandle, error) { + fd, err := rawfilelock.OpenLock(path, readOnly) + if err == nil { + return fd, nil + } + + // the directory of the lockfile seems to be removed, try to create it + if os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { + return fd, fmt.Errorf("creating lock file directory: %w", err) + } + + return openLock(path, readOnly) + } + return fd, &os.PathError{Op: "open", Path: path, Err: err} +} + // createLockFileForPath returns new *LockFile object, possibly (depending on the platform) // working inter-process and associated with the specified path. // @@ -343,11 +356,11 @@ func createLockFileForPath(path string, ro bool) (*LockFile, error) { if err != nil { return nil, err } - unlockAndCloseHandle(fd) + rawfilelock.UnlockAndCloseHandle(fd) - lType := writeLock + lType := rawfilelock.WriteLock if ro { - lType = readLock + lType = rawfilelock.ReadLock } return &LockFile{ @@ -362,40 +375,10 @@ func createLockFileForPath(path string, ro bool) (*LockFile, error) { }, nil } -// openLock opens the file at path and returns the corresponding file -// descriptor. The path is opened either read-only or read-write, -// depending on the value of ro argument. -// -// openLock will create the file and its parent directories, -// if necessary. -func openLock(path string, ro bool) (fd fileHandle, err error) { - flags := os.O_CREATE - if ro { - flags |= os.O_RDONLY - } else { - flags |= os.O_RDWR - } - fd, err = openHandle(path, flags) - if err == nil { - return fd, nil - } - - // the directory of the lockfile seems to be removed, try to create it - if os.IsNotExist(err) { - if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { - return fd, fmt.Errorf("creating lock file directory: %w", err) - } - - return openLock(path, ro) - } - - return fd, &os.PathError{Op: "open", Path: path, Err: err} -} - // lock locks the lockfile via syscall based on the specified type and // command. -func (l *LockFile) lock(lType lockType) { - if lType == readLock { +func (l *LockFile) lock(lType rawfilelock.LockType) { + if lType == rawfilelock.ReadLock { l.rwMutex.RLock() } else { l.rwMutex.Lock() @@ -413,7 +396,7 @@ func (l *LockFile) lock(lType lockType) { // Optimization: only use the (expensive) syscall when // the counter is 0. In this case, we're either the first // reader lock or a writer lock. - if err := lockHandle(l.fd, lType, false); err != nil { + if err := rawfilelock.LockFile(l.fd, lType); err != nil { panic(err) } } @@ -424,10 +407,10 @@ func (l *LockFile) lock(lType lockType) { // lock locks the lockfile via syscall based on the specified type and // command. -func (l *LockFile) tryLock(lType lockType) error { +func (l *LockFile) tryLock(lType rawfilelock.LockType) error { var success bool var rwMutexUnlocker func() - if lType == readLock { + if lType == rawfilelock.ReadLock { success = l.rwMutex.TryRLock() rwMutexUnlocker = l.rwMutex.RUnlock } else { @@ -451,8 +434,8 @@ func (l *LockFile) tryLock(lType lockType) error { // Optimization: only use the (expensive) syscall when // the counter is 0. In this case, we're either the first // reader lock or a writer lock. - if err = lockHandle(l.fd, lType, true); err != nil { - closeHandle(fd) + if err = rawfilelock.TryLockFile(l.fd, lType); err != nil { + rawfilelock.CloseHandle(fd) rwMutexUnlocker() return err } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index 885f2f88..14c27c51 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -9,8 +9,6 @@ import ( "golang.org/x/sys/unix" ) -type fileHandle uintptr - // GetLastWrite returns a LastWrite value corresponding to current state of the lock. // This is typically called before (_not after_) loading the state when initializing a consumer // of the data protected by the lock. @@ -66,41 +64,3 @@ func (l *LockFile) TouchedSince(when time.Time) bool { touched := time.Unix(mtim.Unix()) return when.Before(touched) } - -func openHandle(path string, mode int) (fileHandle, error) { - mode |= unix.O_CLOEXEC - fd, err := unix.Open(path, mode, 0o644) - return fileHandle(fd), err -} - -func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error { - fType := unix.F_RDLCK - if lType != readLock { - fType = unix.F_WRLCK - } - lk := unix.Flock_t{ - Type: int16(fType), - Whence: int16(unix.SEEK_SET), - Start: 0, - Len: 0, - } - cmd := unix.F_SETLKW - if nonblocking { - cmd = unix.F_SETLK - } - for { - err := unix.FcntlFlock(uintptr(fd), cmd, &lk) - if err == nil || nonblocking { - return err - } - time.Sleep(10 * time.Millisecond) - } -} - -func unlockAndCloseHandle(fd fileHandle) { - unix.Close(int(fd)) -} - -func closeHandle(fd fileHandle) { - unix.Close(int(fd)) -} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go index 0cc1c50c..e66f7bfb 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go @@ -14,8 +14,6 @@ const ( allBytes = ^uint32(0) ) -type fileHandle windows.Handle - // GetLastWrite returns a LastWrite value corresponding to current state of the lock. // This is typically called before (_not after_) loading the state when initializing a consumer // of the data protected by the lock. @@ -73,37 +71,3 @@ func (l *LockFile) TouchedSince(when time.Time) bool { } return when.Before(stat.ModTime()) } - -func openHandle(path string, mode int) (fileHandle, error) { - mode |= windows.O_CLOEXEC - fd, err := windows.Open(path, mode, windows.S_IWRITE) - return fileHandle(fd), err -} - -func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error { - flags := 0 - if lType != readLock { - flags = windows.LOCKFILE_EXCLUSIVE_LOCK - } - if nonblocking { - flags |= windows.LOCKFILE_FAIL_IMMEDIATELY - } - ol := new(windows.Overlapped) - if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil { - if nonblocking { - return err - } - panic(err) - } - return nil -} - -func unlockAndCloseHandle(fd fileHandle) { - ol := new(windows.Overlapped) - windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol) - closeHandle(fd) -} - -func closeHandle(fd fileHandle) { - windows.Close(windows.Handle(fd)) -} diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go index 4ad63451..d5f5d638 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -1,4 +1,4 @@ -//go:build linux && cgo +//go:build linux package loopback @@ -16,8 +16,8 @@ import ( // Loopback related errors var ( ErrAttachLoopbackDevice = errors.New("loopback attach failed") - ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") - ErrSetCapacity = errors.New("Unable set loopback capacity") + ErrGetLoopbackBackingFile = errors.New("unable to get loopback backing file") + ErrSetCapacity = errors.New("unable set loopback capacity") ) func stringToLoopName(src string) [LoNameSize]uint8 { @@ -113,7 +113,7 @@ func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (*os.File logrus.Errorf("Getting loopback backing file: %s", err) return nil, ErrGetLoopbackBackingFile } - if dev != uint64(st.Dev) || ino != st.Ino { + if dev != uint64(st.Dev) || ino != st.Ino { //nolint:unconvert logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino) } return loopFile, nil diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go index cf77e523..9acc519c 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go +++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go @@ -1,4 +1,4 @@ -//go:build linux && cgo +//go:build linux package loopback diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go index 2a4f5cbc..e004c7fe 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go +++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go @@ -1,20 +1,10 @@ -//go:build linux && cgo +//go:build linux package loopback -/* -#include // FIXME: present only for defines, maybe we can remove it? - -#ifndef LOOP_CTL_GET_FREE - #define LOOP_CTL_GET_FREE 0x4C82 -#endif - -#ifndef LO_FLAGS_PARTSCAN - #define LO_FLAGS_PARTSCAN 8 -#endif - -*/ -import "C" +import ( + "golang.org/x/sys/unix" +) type loopInfo64 struct { loDevice uint64 /* ioctl r/o */ @@ -34,19 +24,19 @@ type loopInfo64 struct { // IOCTL consts const ( - LoopSetFd = C.LOOP_SET_FD - LoopCtlGetFree = C.LOOP_CTL_GET_FREE - LoopGetStatus64 = C.LOOP_GET_STATUS64 - LoopSetStatus64 = C.LOOP_SET_STATUS64 - LoopClrFd = C.LOOP_CLR_FD - LoopSetCapacity = C.LOOP_SET_CAPACITY + LoopSetFd = unix.LOOP_SET_FD + LoopCtlGetFree = unix.LOOP_CTL_GET_FREE + LoopGetStatus64 = unix.LOOP_GET_STATUS64 + LoopSetStatus64 = unix.LOOP_SET_STATUS64 + LoopClrFd = unix.LOOP_CLR_FD + LoopSetCapacity = unix.LOOP_SET_CAPACITY ) // LOOP consts. const ( - LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR - LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY - LoFlagsPartScan = C.LO_FLAGS_PARTSCAN - LoKeySize = C.LO_KEY_SIZE - LoNameSize = C.LO_NAME_SIZE + LoFlagsAutoClear = unix.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = unix.LO_FLAGS_READ_ONLY + LoFlagsPartScan = unix.LO_FLAGS_PARTSCAN + LoKeySize = unix.LO_KEY_SIZE + LoNameSize = unix.LO_NAME_SIZE ) diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go index 2d882127..f773e11d 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go @@ -1,4 +1,4 @@ -//go:build linux && cgo +//go:build linux package loopback @@ -36,7 +36,7 @@ func FindLoopDeviceFor(file *os.File) *os.File { return nil } targetInode := stat.Sys().(*syscall.Stat_t).Ino - targetDevice := stat.Sys().(*syscall.Stat_t).Dev + targetDevice := uint64(stat.Sys().(*syscall.Stat_t).Dev) //nolint:unconvert for i := 0; true; i++ { path := fmt.Sprintf("/dev/loop%d", i) @@ -53,7 +53,7 @@ func FindLoopDeviceFor(file *os.File) *os.File { } dev, inode, err := getLoopbackBackingFile(file) - if err == nil && dev == uint64(targetDevice) && inode == targetInode { + if err == nil && dev == targetDevice && inode == targetInode { return file } file.Close() diff --git a/vendor/github.com/containers/storage/pkg/pools/pools.go b/vendor/github.com/containers/storage/pkg/pools/pools.go index a15e3688..e68648d1 100644 --- a/vendor/github.com/containers/storage/pkg/pools/pools.go +++ b/vendor/github.com/containers/storage/pkg/pools/pools.go @@ -40,7 +40,7 @@ func init() { // added here to be shared where required. func newBufioReaderPoolWithSize(size int) *BufioReaderPool { pool := &sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + New: func() any { return bufio.NewReaderSize(nil, size) }, } return &BufioReaderPool{pool: pool} } @@ -87,7 +87,7 @@ type BufioWriterPool struct { // added here to be shared where required. func newBufioWriterPoolWithSize(size int) *BufioWriterPool { pool := &sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + New: func() any { return bufio.NewWriterSize(nil, size) }, } return &BufioWriterPool{pool: pool} } diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go index 0c032e6c..a1938cd4 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -49,7 +49,7 @@ func panicIfNotInitialized() { } } -func naiveSelf() string { //nolint: unused +func naiveSelf() string { name := os.Args[0] if filepath.Base(name) == name { if lp, err := exec.LookPath(name); err == nil { diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go index f63c3e44..303a7291 100644 --- a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go +++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go @@ -24,7 +24,7 @@ func GenerateRandomASCIIString(n int) string { "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " res := make([]byte, n) - for i := 0; i < n; i++ { + for i := range n { res[i] = chars[rand.IntN(len(chars))] } return string(res) @@ -83,7 +83,7 @@ func quote(word string, buf *bytes.Buffer) { buf.WriteString("'") - for i := 0; i < len(word); i++ { + for i := range len(word) { b := word[i] if b == '\'' { // Replace literal ' with a close ', a \', and an open ' diff --git a/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go b/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go new file mode 100644 index 00000000..1314058f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go @@ -0,0 +1,93 @@ +//go:build freebsd + +package system + +import ( + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + EXTATTR_NAMESPACE_EMPTY = unix.EXTATTR_NAMESPACE_EMPTY + EXTATTR_NAMESPACE_USER = unix.EXTATTR_NAMESPACE_USER + EXTATTR_NAMESPACE_SYSTEM = unix.EXTATTR_NAMESPACE_SYSTEM +) + +// ExtattrGetLink retrieves the value of the extended attribute identified by attrname +// in the given namespace and associated with the given path in the file system. +// If the path is a symbolic link, the extended attribute is retrieved from the link itself. +// Returns a []byte slice if the extattr is set and nil otherwise. +func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) { + size, errno := unix.ExtattrGetLink(path, attrnamespace, attrname, + uintptr(unsafe.Pointer(nil)), 0) + if errno != nil { + if errno == unix.ENOATTR { + return nil, nil + } + return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno} + } + if size == 0 { + return []byte{}, nil + } + + dest := make([]byte, size) + size, errno = unix.ExtattrGetLink(path, attrnamespace, attrname, + uintptr(unsafe.Pointer(&dest[0])), size) + if errno != nil { + return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno} + } + + return dest[:size], nil +} + +// ExtattrSetLink sets the value of extended attribute identified by attrname +// in the given namespace and associated with the given path in the file system. +// If the path is a symbolic link, the extended attribute is set on the link itself. +func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error { + if len(data) == 0 { + data = []byte{} // ensure non-nil for empty data + } + if _, errno := unix.ExtattrSetLink(path, attrnamespace, attrname, + uintptr(unsafe.Pointer(&data[0])), len(data)); errno != nil { + return &os.PathError{Op: "extattr_set_link", Path: path, Err: errno} + } + + return nil +} + +// ExtattrListLink lists extended attributes associated with the given path +// in the specified namespace. If the path is a symbolic link, the attributes +// are listed from the link itself. +func ExtattrListLink(path string, attrnamespace int) ([]string, error) { + size, errno := unix.ExtattrListLink(path, attrnamespace, + uintptr(unsafe.Pointer(nil)), 0) + if errno != nil { + return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno} + } + if size == 0 { + return []string{}, nil + } + + dest := make([]byte, size) + size, errno = unix.ExtattrListLink(path, attrnamespace, + uintptr(unsafe.Pointer(&dest[0])), size) + if errno != nil { + return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno} + } + + var attrs []string + for i := 0; i < size; { + // Each attribute is preceded by a single byte length + length := int(dest[i]) + i++ + if i+length > size { + break + } + attrs = append(attrs, string(dest[i:i+length])) + i += length + } + + return attrs, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go b/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go new file mode 100644 index 00000000..07b67357 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go @@ -0,0 +1,24 @@ +//go:build !freebsd + +package system + +const ( + EXTATTR_NAMESPACE_EMPTY = 0 + EXTATTR_NAMESPACE_USER = 0 + EXTATTR_NAMESPACE_SYSTEM = 0 +) + +// ExtattrGetLink is not supported on platforms other than FreeBSD. +func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// ExtattrSetLink is not supported on platforms other than FreeBSD. +func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error { + return ErrNotSupportedPlatform +} + +// ExtattrListLink is not supported on platforms other than FreeBSD. +func ExtattrListLink(path string, attrnamespace int) ([]string, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go index e3d13463..0dee88d1 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -9,9 +9,9 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mode: s.Mode, uid: s.Uid, gid: s.Gid, - rdev: uint64(s.Rdev), + rdev: uint64(s.Rdev), //nolint:unconvert mtim: s.Mtim, - dev: uint64(s.Dev), + dev: uint64(s.Dev), //nolint:unconvert }, nil } diff --git a/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go new file mode 100644 index 00000000..715f05b9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go index 75275b96..27ada208 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go @@ -12,7 +12,7 @@ const ( E2BIG unix.Errno = unix.E2BIG // Operation not supported - EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + ENOTSUP unix.Errno = unix.ENOTSUP ) // Lgetxattr retrieves the value of the extended attribute identified by attr diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go b/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go new file mode 100644 index 00000000..5d653976 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go @@ -0,0 +1,85 @@ +package system + +import ( + "strings" + + "golang.org/x/sys/unix" +) + +const ( + // Value is larger than the maximum size allowed + E2BIG unix.Errno = unix.E2BIG + + // Operation not supported + ENOTSUP unix.Errno = unix.ENOTSUP + + // Value is too small or too large for maximum size allowed + EOVERFLOW unix.Errno = unix.EOVERFLOW +) + +var ( + namespaceMap = map[string]int{ + "user": EXTATTR_NAMESPACE_USER, + "system": EXTATTR_NAMESPACE_SYSTEM, + } +) + +func xattrToExtattr(xattr string) (namespace int, extattr string, err error) { + namespaceName, extattr, found := strings.Cut(xattr, ".") + if !found { + return -1, "", ENOTSUP + } + + namespace, ok := namespaceMap[namespaceName] + if !ok { + return -1, "", ENOTSUP + } + return namespace, extattr, nil +} + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// Returns a []byte slice if the xattr is set and nil otherwise. +func Lgetxattr(path string, attr string) ([]byte, error) { + namespace, extattr, err := xattrToExtattr(attr) + if err != nil { + return nil, err + } + return ExtattrGetLink(path, namespace, extattr) +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, value []byte, flags int) error { + if flags != 0 { + // FIXME: Flags are not supported on FreeBSD, but we can implement + // them mimicking the behavior of the Linux implementation. + // See lsetxattr(2) on Linux for more information. + return ENOTSUP + } + + namespace, extattr, err := xattrToExtattr(attr) + if err != nil { + return err + } + return ExtattrSetLink(path, namespace, extattr, value) +} + +// Llistxattr lists extended attributes associated with the given path +// in the file system. +func Llistxattr(path string) ([]string, error) { + attrs := []string{} + + for namespaceName, namespace := range namespaceMap { + namespaceAttrs, err := ExtattrListLink(path, namespace) + if err != nil { + return nil, err + } + + for _, attr := range namespaceAttrs { + attrs = append(attrs, namespaceName+"."+attr) + } + } + + return attrs, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go index 6b47c4e7..12462cca 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -12,7 +12,7 @@ const ( E2BIG unix.Errno = unix.E2BIG // Operation not supported - EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + ENOTSUP unix.Errno = unix.ENOTSUP // Value is too small or too large for maximum size allowed EOVERFLOW unix.Errno = unix.EOVERFLOW diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go index 0ab61f3d..66bf5858 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -1,4 +1,4 @@ -//go:build !linux && !darwin +//go:build !linux && !darwin && !freebsd package system @@ -9,7 +9,7 @@ const ( E2BIG syscall.Errno = syscall.Errno(0) // Operation not supported - EOPNOTSUPP syscall.Errno = syscall.Errno(0) + ENOTSUP syscall.Errno = syscall.Errno(0) // Value is too small or too large for maximum size allowed EOVERFLOW syscall.Errno = syscall.Errno(0) diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index b45a6819..9e0e562d 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -32,9 +32,9 @@ type Cmd struct { *exec.Cmd UnshareFlags int UseNewuidmap bool - UidMappings []specs.LinuxIDMapping // nolint: revive,golint + UidMappings []specs.LinuxIDMapping //nolint: revive UseNewgidmap bool - GidMappings []specs.LinuxIDMapping // nolint: revive,golint + GidMappings []specs.LinuxIDMapping //nolint: revive GidMappingsEnableSetgroups bool Setsid bool Setpgrp bool @@ -98,7 +98,7 @@ func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error return cap.Get(capability.EFFECTIVE, capid), nil } -func (c *Cmd) Start() error { +func (c *Cmd) Start() (retErr error) { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -167,6 +167,15 @@ func (c *Cmd) Start() error { return err } + // If the function fails from here, we need to make sure the + // child process is killed and properly cleaned up. + defer func() { + if retErr != nil { + _ = c.Cmd.Process.Kill() + _ = c.Cmd.Wait() + } + }() + // Close the ends of the pipes that the parent doesn't need. continueRead.Close() continueRead = nil @@ -240,7 +249,7 @@ func (c *Cmd) Start() error { if err != nil { return fmt.Errorf("finding newgidmap: %w", err) } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(g.String())...)...) g.Reset() cmd.Stdout = g cmd.Stderr = g @@ -258,7 +267,7 @@ func (c *Cmd) Start() error { } logrus.Warnf("Falling back to single mapping") g.Reset() - g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) + fmt.Fprintf(g, "0 %d 1\n", os.Getegid()) } } if !gidmapSet { @@ -300,7 +309,7 @@ func (c *Cmd) Start() error { if err != nil { return fmt.Errorf("finding newuidmap: %w", err) } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(u.String())...)...) u.Reset() cmd.Stdout = u cmd.Stderr = u @@ -319,7 +328,7 @@ func (c *Cmd) Start() error { logrus.Warnf("Falling back to single mapping") u.Reset() - u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) + fmt.Fprintf(u, "0 %d 1\n", os.Geteuid()) } } if !uidmapSet { @@ -459,7 +468,7 @@ type Runnable interface { Run() error } -func bailOnError(err error, format string, a ...interface{}) { // nolint: revive,goprintffuncname +func bailOnError(err error, format string, a ...any) { //nolint:revive,goprintffuncname if err != nil { if format != "" { logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 96232523..2fff0cec 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -80,6 +80,25 @@ additionalimagestores = [ # This is a "string bool": "false" | "true" (cannot be native TOML boolean) # convert_images = "false" +# This should ALMOST NEVER be set. +# It allows partial pulls of images without guaranteeing that "partial +# pulls" and non-partial pulls both result in consistent image contents. +# This allows pulling estargz images and early versions of zstd:chunked images; +# otherwise, these layers always use the traditional non-partial pull path. +# +# This option should be enabled EXTREMELY rarely, only if ALL images that could +# EVER be conceivably pulled on this system are GUARANTEED (e.g. using a signature policy) +# to come from a build system trusted to never attack image integrity. +# +# If this consistency enforcement were disabled, malicious images could be built +# in a way designed to evade other audit mechanisms, so presence of most other audit +# mechanisms is not a replacement for the above-mentioned need for all images to come +# from a trusted build system. +# +# As a side effect, enabling this option will also make image IDs unpredictable +# (usually not equal to the traditional value matching the config digest). +# insecure_allow_unpredictable_image_contents = "false" + # Root-auto-userns-user is a user name which can be used to look up one or more UID/GID # ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned # to containers configured to create automatically a user namespace. Containers diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index a2d38500..10f4d56a 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -18,8 +18,11 @@ import ( // register all of the built-in drivers _ "github.com/containers/storage/drivers/register" + "golang.org/x/sync/errgroup" drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/internal/dedup" + "github.com/containers/storage/internal/tempdir" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" @@ -29,7 +32,6 @@ import ( "github.com/containers/storage/pkg/stringutils" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" - "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" @@ -161,11 +163,31 @@ type flaggableStore interface { ClearFlag(id string, flag string) error // SetFlag sets a named flag and its value on an item in the store. - SetFlag(id string, flag string, value interface{}) error + SetFlag(id string, flag string, value any) error } type StoreOptions = types.StoreOptions +type DedupHashMethod = dedup.DedupHashMethod + +const ( + DedupHashInvalid = dedup.DedupHashInvalid + DedupHashCRC = dedup.DedupHashCRC + DedupHashFileSize = dedup.DedupHashFileSize + DedupHashSHA256 = dedup.DedupHashSHA256 +) + +type ( + DedupOptions = dedup.DedupOptions + DedupResult = dedup.DedupResult +) + +// DedupArgs is used to pass arguments to the Dedup command. +type DedupArgs struct { + // Options that are passed directly to the internal/dedup.DedupDirs function. + Options DedupOptions +} + // Store wraps up the various types of file-based stores that we use into a // singleton object that initializes and manages them all together. type Store interface { @@ -341,15 +363,11 @@ type Store interface { // } ApplyDiff(to string, diff io.Reader) (int64, error) - // ApplyDiffWithDiffer applies a diff to a layer. - // It is the caller responsibility to clean the staging directory if it is not - // successfully applied with ApplyStagedLayer. - // Deprecated: Use PrepareStagedLayer instead. ApplyDiffWithDiffer is going to be removed in a future release - ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) - // PrepareStagedLayer applies a diff to a layer. // It is the caller responsibility to clean the staging directory if it is not // successfully applied with ApplyStagedLayer. + // The caller must ensure [Store.ApplyStagedLayer] or [Store.CleanupStagedLayer] is called eventually + // with the returned [drivers.DriverWithDifferOutput] object. PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) // ApplyStagedLayer combines the functions of creating a layer and using the staging @@ -589,6 +607,9 @@ type Store interface { // MultiList returns consistent values as of a single point in time. // WARNING: The values may already be out of date by the time they are returned to the caller. MultiList(MultiListOptions) (MultiListResult, error) + + // Dedup deduplicates layers in the store. + Dedup(DedupArgs) (drivers.DedupResult, error) } // AdditionalLayer represents a layer that is contained in the additional layer store @@ -648,7 +669,7 @@ type LayerOptions struct { // Flags is a set of named flags and their values to store with the layer. // Currently these can only be set when the layer record is created, but that // could change in the future. - Flags map[string]interface{} + Flags map[string]any } type LayerBigDataOption struct { @@ -676,7 +697,7 @@ type ImageOptions struct { NamesHistory []string // Flags is a set of named flags and their values to store with the image. Currently these can only // be set when the image record is created, but that could change in the future. - Flags map[string]interface{} + Flags map[string]any } type ImageBigDataOption struct { @@ -696,7 +717,7 @@ type ContainerOptions struct { // Flags is a set of named flags and their values to store with the container. // Currently these can only be set when the container record is created, but that // could change in the future. - Flags map[string]interface{} + Flags map[string]any MountOpts []string Volatile bool StorageOpt map[string]string @@ -1425,16 +1446,7 @@ func (s *store) writeToAllStores(fn func(rlstore rwLayerStore) error) error { // On entry: // - rlstore must be locked for writing func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { - if !s.graphDriver.SupportsShifting() { - return false - } - if uidmap != nil && !idtools.IsContiguous(uidmap) { - return false - } - if gidmap != nil && !idtools.IsContiguous(gidmap) { - return false - } - return true + return s.graphDriver.SupportsShifting(uidmap, gidmap) } // On entry: @@ -1625,7 +1637,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...) options.NamesHistory = append(options.NamesHistory, iOptions.NamesHistory...) if options.Flags == nil { - options.Flags = make(map[string]interface{}) + options.Flags = make(map[string]any) } maps.Copy(options.Flags, iOptions.Flags) } @@ -1747,7 +1759,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst } // By construction, createMappedLayer can only be true if ristore == s.imageStore. if err = s.imageStore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { - if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { + if err2 := rlstore.deleteWhileHoldingLock(mappedLayer.ID); err2 != nil { err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err) } return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err) @@ -1894,7 +1906,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat } } if options.Flags == nil { - options.Flags = make(map[string]interface{}) + options.Flags = make(map[string]any) } plabel, _ := options.Flags[processLabelFlag].(string) mlabel, _ := options.Flags[mountLabelFlag].(string) @@ -1932,7 +1944,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat } container, err := s.containerStore.create(id, names, imageID, layer, &options) if err != nil || container == nil { - if err2 := rlstore.Delete(layer); err2 != nil { + if err2 := rlstore.deleteWhileHoldingLock(layer); err2 != nil { if err == nil { err = fmt.Errorf("deleting layer %#v: %w", layer, err2) } else { @@ -2529,7 +2541,13 @@ func (s *store) Lookup(name string) (string, error) { return "", ErrLayerUnknown } -func (s *store) DeleteLayer(id string) error { +func (s *store) DeleteLayer(id string) (retErr error) { + cleanupFunctions := []tempdir.CleanupTempDirFunc{} + defer func() { + if cleanupErr := tempdir.CleanupTemporaryDirectories(cleanupFunctions...); cleanupErr != nil { + retErr = errors.Join(cleanupErr, retErr) + } + }() return s.writeToAllStores(func(rlstore rwLayerStore) error { if rlstore.Exists(id) { if l, err := rlstore.Get(id); err != nil { @@ -2563,7 +2581,9 @@ func (s *store) DeleteLayer(id string) error { return fmt.Errorf("layer %v used by container %v: %w", id, container.ID, ErrLayerUsedByContainer) } } - if err := rlstore.Delete(id); err != nil { + cf, err := rlstore.deferredDelete(id) + cleanupFunctions = append(cleanupFunctions, cf...) + if err != nil { return fmt.Errorf("delete layer %v: %w", id, err) } @@ -2580,8 +2600,14 @@ func (s *store) DeleteLayer(id string) error { }) } -func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { +func (s *store) DeleteImage(id string, commit bool) (layers []string, retErr error) { layersToRemove := []string{} + cleanupFunctions := []tempdir.CleanupTempDirFunc{} + defer func() { + if cleanupErr := tempdir.CleanupTemporaryDirectories(cleanupFunctions...); cleanupErr != nil { + retErr = errors.Join(cleanupErr, retErr) + } + }() if err := s.writeToAllStores(func(rlstore rwLayerStore) error { // Delete image from all available imagestores configured to be used. imageFound := false @@ -2687,7 +2713,9 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) } if commit { for _, layer := range layersToRemove { - if err = rlstore.Delete(layer); err != nil { + cf, err := rlstore.deferredDelete(layer) + cleanupFunctions = append(cleanupFunctions, cf...) + if err != nil { return err } } @@ -2699,7 +2727,13 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) return layersToRemove, nil } -func (s *store) DeleteContainer(id string) error { +func (s *store) DeleteContainer(id string) (retErr error) { + cleanupFunctions := []tempdir.CleanupTempDirFunc{} + defer func() { + if cleanupErr := tempdir.CleanupTemporaryDirectories(cleanupFunctions...); cleanupErr != nil { + retErr = errors.Join(cleanupErr, retErr) + } + }() return s.writeToAllStores(func(rlstore rwLayerStore) error { if !s.containerStore.Exists(id) { return ErrNotAContainer @@ -2715,12 +2749,14 @@ func (s *store) DeleteContainer(id string) error { // the container record that refers to it, effectively losing // track of it if rlstore.Exists(container.LayerID) { - if err := rlstore.Delete(container.LayerID); err != nil { + cf, err := rlstore.deferredDelete(container.LayerID) + cleanupFunctions = append(cleanupFunctions, cf...) + if err != nil { return err } } - var wg multierror.Group + var wg errgroup.Group middleDir := s.graphDriverName + "-containers" @@ -2735,18 +2771,26 @@ func (s *store) DeleteContainer(id string) error { }) if multierr := wg.Wait(); multierr != nil { - return multierr.ErrorOrNil() + return multierr } return s.containerStore.Delete(id) }) } -func (s *store) Delete(id string) error { +func (s *store) Delete(id string) (retErr error) { + cleanupFunctions := []tempdir.CleanupTempDirFunc{} + defer func() { + if cleanupErr := tempdir.CleanupTemporaryDirectories(cleanupFunctions...); cleanupErr != nil { + retErr = errors.Join(cleanupErr, retErr) + } + }() return s.writeToAllStores(func(rlstore rwLayerStore) error { if s.containerStore.Exists(id) { if container, err := s.containerStore.Get(id); err == nil { if rlstore.Exists(container.LayerID) { - if err = rlstore.Delete(container.LayerID); err != nil { + cf, err := rlstore.deferredDelete(container.LayerID) + cleanupFunctions = append(cleanupFunctions, cf...) + if err != nil { return err } if err = s.containerStore.Delete(id); err != nil { @@ -2770,7 +2814,9 @@ func (s *store) Delete(id string) error { return s.imageStore.Delete(id) } if rlstore.Exists(id) { - return rlstore.Delete(id) + cf, err := rlstore.deferredDelete(id) + cleanupFunctions = append(cleanupFunctions, cf...) + return err } return ErrLayerUnknown }) @@ -2806,7 +2852,11 @@ func (s *store) Version() ([][2]string, error) { return [][2]string{}, nil } -func (s *store) mount(id string, options drivers.MountOpts) (string, error) { +func (s *store) MountImage(id string, mountOpts []string, mountLabel string) (string, error) { + if err := validateMountOptions(mountOpts); err != nil { + return "", err + } + // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver // in startUsingGraphDriver(). if err := s.startUsingGraphDriver(); err != nil { @@ -2818,57 +2868,61 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) { if err != nil { return "", err } - if options.UidMaps != nil || options.GidMaps != nil { - options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps) - } + var imageHomeStore roImageStore - // function used to have a scope for rlstore.StopWriting() - tryMount := func() (string, error) { - if err := rlstore.startWriting(); err != nil { + if err := rlstore.startWriting(); err != nil { + return "", err + } + defer rlstore.stopWriting() + for _, s := range lstores { + if err := s.startReading(); err != nil { return "", err } - defer rlstore.stopWriting() - if rlstore.Exists(id) { - return rlstore.Mount(id, options) - } - return "", nil + defer s.stopReading() } - mountPoint, err := tryMount() - if mountPoint != "" || err != nil { - return mountPoint, err + if err := s.imageStore.startWriting(); err != nil { + return "", err } + defer s.imageStore.stopWriting() - // check if the layer is in a read-only store, and return a better error message - for _, store := range lstores { - if err := store.startReading(); err != nil { - return "", err - } - exists := store.Exists(id) - store.stopReading() - if exists { - return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrStoreIsReadOnly) + cimage, err := s.imageStore.Get(id) + if err == nil { + imageHomeStore = s.imageStore + } else { + for _, s := range s.roImageStores { + if err := s.startReading(); err != nil { + return "", err + } + defer s.stopReading() + cimage, err = s.Get(id) + if err == nil { + imageHomeStore = s + break + } } } + if cimage == nil { + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) + } - return "", ErrLayerUnknown -} - -func (s *store) MountImage(id string, mountOpts []string, mountLabel string) (string, error) { - // Append ReadOnly option to mountOptions - img, err := s.Image(id) + idmappingsOpts := types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, rlstore, lstores, idmappingsOpts) if err != nil { return "", err } - if err := validateMountOptions(mountOpts); err != nil { - return "", err + if len(ilayer.UIDMap) > 0 || len(ilayer.GIDMap) > 0 { + return "", fmt.Errorf("cannot create an image with canonical UID/GID mappings in a read-only store") } + options := drivers.MountOpts{ MountLabel: mountLabel, Options: append(mountOpts, "ro"), } - - return s.mount(img.TopLayer, options) + return rlstore.Mount(ilayer.ID, options) } func (s *store) Mount(id, mountLabel string) (string, error) { @@ -2890,7 +2944,43 @@ func (s *store) Mount(id, mountLabel string) (string, error) { } } } - return s.mount(id, options) + + // We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver + // in startUsingGraphDriver(). + if err := s.startUsingGraphDriver(); err != nil { + return "", err + } + defer s.stopUsingGraphDriver() + + rlstore, lstores, err := s.bothLayerStoreKindsLocked() + if err != nil { + return "", err + } + if options.UidMaps != nil || options.GidMaps != nil { + options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps) + } + + if err := rlstore.startWriting(); err != nil { + return "", err + } + defer rlstore.stopWriting() + if rlstore.Exists(id) { + return rlstore.Mount(id, options) + } + + // check if the layer is in a read-only store, and return a better error message + for _, store := range lstores { + if err := store.startReading(); err != nil { + return "", err + } + exists := store.Exists(id) + store.stopReading() + if exists { + return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrStoreIsReadOnly) + } + } + + return "", ErrLayerUnknown } func (s *store) Mounted(id string) (int, error) { @@ -2914,7 +3004,23 @@ func (s *store) UnmountImage(id string, force bool) (bool, error) { if err != nil { return false, err } - return s.Unmount(img.TopLayer, force) + + return writeToLayerStore(s, func(lstore rwLayerStore) (bool, error) { + for _, layerID := range img.MappedTopLayers { + l, err := lstore.Get(layerID) + if err != nil { + if err == ErrLayerUnknown { + continue + } + return false, err + } + // check if the layer with the canonical mapping is in the mapped top layers + if len(l.UIDMap) == 0 && len(l.GIDMap) == 0 { + return lstore.unmount(l.ID, force, false) + } + } + return lstore.unmount(img.TopLayer, force, false) + }) } func (s *store) Unmount(id string, force bool) (bool, error) { @@ -3048,6 +3154,12 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro } func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) { + defer func() { + if args.DiffOutput.TarSplit != nil { + args.DiffOutput.TarSplit.Close() + args.DiffOutput.TarSplit = nil + } + }() rlstore, rlstores, err := s.bothLayerStoreKinds() if err != nil { return nil, err @@ -3079,6 +3191,10 @@ func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) { } func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error { + if diffOutput.TarSplit != nil { + diffOutput.TarSplit.Close() + diffOutput.TarSplit = nil + } _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target) }) @@ -3093,13 +3209,6 @@ func (s *store) PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, dif return rlstore.applyDiffWithDifferNoLock(options, differ) } -func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { - if to != "" { - return nil, fmt.Errorf("ApplyDiffWithDiffer does not support non-empty 'layer' parameter") - } - return s.PrepareStagedLayer(options, differ) -} - func (s *store) DifferTarget(id string) (string, error) { return writeToLayerStore(s, func(rlstore rwLayerStore) (string, error) { if rlstore.Exists(id) { @@ -3639,7 +3748,7 @@ func makeBigDataBaseName(key string) string { if err != nil || size != 1 { break } - if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') { + if ch != '.' && (ch < '0' || ch > '9') && (ch < 'a' || ch > 'z') { break } } @@ -3679,11 +3788,10 @@ func copyMapPreferringNil[K comparable, V any](m map[K]V) map[K]V { // newMapFrom returns a shallow clone of map m. // If m is empty, an empty map is allocated and returned. func newMapFrom[K comparable, V any](m map[K]V) map[K]V { - ret := make(map[K]V, len(m)) - for k, v := range m { - ret[k] = v + if len(m) == 0 { + return make(map[K]V, 0) } - return ret + return maps.Clone(m) } func copyImageBigDataOptionSlice(slice []ImageBigDataOption) []ImageBigDataOption { @@ -3843,3 +3951,43 @@ func (s *store) MultiList(options MultiListOptions) (MultiListResult, error) { } return out, nil } + +// Dedup deduplicates layers in the store. +func (s *store) Dedup(req DedupArgs) (drivers.DedupResult, error) { + imgs, err := s.Images() + if err != nil { + return drivers.DedupResult{}, err + } + var topLayers []string + for _, i := range imgs { + topLayers = append(topLayers, i.TopLayer) + topLayers = append(topLayers, i.MappedTopLayers...) + } + return writeToLayerStore(s, func(rlstore rwLayerStore) (drivers.DedupResult, error) { + layers := make(map[string]struct{}) + for _, i := range topLayers { + cur := i + for cur != "" { + if _, visited := layers[cur]; visited { + break + } + l, err := rlstore.Get(cur) + if err != nil { + if err == ErrLayerUnknown { + break + } + return drivers.DedupResult{}, err + } + layers[cur] = struct{}{} + cur = l.Parent + } + } + r := drivers.DedupArgs{ + Options: req.Options, + } + for l := range layers { + r.Layers = append(r.Layers, l) + } + return rlstore.dedup(r) + }) +} diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index efc08c47..451a3f6d 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -160,19 +160,17 @@ func loadStoreOptionsFromConfFile(storageConf string) (StoreOptions, error) { defaultRootlessGraphRoot = storageOpts.GraphRoot storageOpts = StoreOptions{} reloadConfigurationFileIfNeeded(storageConf, &storageOpts) - if usePerUserStorage() { - // If the file did not specify a graphroot or runroot, - // set sane defaults so we don't try and use root-owned - // directories - if storageOpts.RunRoot == "" { - storageOpts.RunRoot = defaultRootlessRunRoot - } - if storageOpts.GraphRoot == "" { - if storageOpts.RootlessStoragePath != "" { - storageOpts.GraphRoot = storageOpts.RootlessStoragePath - } else { - storageOpts.GraphRoot = defaultRootlessGraphRoot - } + // If the file did not specify a graphroot or runroot, + // set sane defaults so we don't try and use root-owned + // directories + if storageOpts.RunRoot == "" { + storageOpts.RunRoot = defaultRootlessRunRoot + } + if storageOpts.GraphRoot == "" { + if storageOpts.RootlessStoragePath != "" { + storageOpts.GraphRoot = storageOpts.RootlessStoragePath + } else { + storageOpts.GraphRoot = defaultRootlessGraphRoot } } } @@ -394,7 +392,7 @@ func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio } mtime := fi.ModTime() - if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile { + if prevReloadConfig.storeOptions != nil && mtime.Equal(prevReloadConfig.mod) && prevReloadConfig.configFile == configFile { *storeOptions = *prevReloadConfig.storeOptions return nil } diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go index 73fcd240..1a8c463e 100644 --- a/vendor/github.com/containers/storage/types/utils.go +++ b/vendor/github.com/containers/storage/types/utils.go @@ -14,7 +14,7 @@ import ( func expandEnvPath(path string, rootlessUID int) (string, error) { var err error - path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1) + path = strings.ReplaceAll(path, "$UID", strconv.Itoa(rootlessUID)) path = os.ExpandEnv(path) newpath, err := filepath.EvalSymlinks(path) if err != nil { @@ -61,7 +61,7 @@ func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio } mtime := fi.ModTime() - if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile { + if prevReloadConfig.storeOptions != nil && mtime.Equal(prevReloadConfig.mod) && prevReloadConfig.configFile == configFile { *storeOptions = *prevReloadConfig.storeOptions return } diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go index 09919394..117a732c 100644 --- a/vendor/github.com/containers/storage/userns.go +++ b/vendor/github.com/containers/storage/userns.go @@ -202,7 +202,7 @@ outer: return 0, err } defer func() { - if err2 := rlstore.Delete(clayer.ID); err2 != nil { + if err2 := rlstore.deleteWhileHoldingLock(clayer.ID); err2 != nil { if retErr == nil { retErr = fmt.Errorf("deleting temporary layer %#v: %w", clayer.ID, err2) } else { @@ -276,10 +276,7 @@ func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image, rl // bigger than s.autoNsMaxSize. // This is a best effort heuristic. if requestedSize == 0 { - size = initialSize - if s.autoNsMinSize > size { - size = s.autoNsMinSize - } + size = max(s.autoNsMinSize, initialSize) if image != nil { sizeFromImage, err := s.getMaxSizeFromImage(image, rlstore, lstores, options.PasswdFile, options.GroupFile) if err != nil { diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md index 04b5685a..ca0e3c62 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md +++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -6,6 +6,80 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ## +## [0.4.1] - 2025-01-28 ## + +### Fixed ### +- The restrictions added for `root` paths passed to `SecureJoin` in 0.4.0 was + found to be too strict and caused some regressions when folks tried to + update, so this restriction has been relaxed to only return an error if the + path contains a `..` component. We still recommend users use `filepath.Clean` + (and even `filepath.EvalSymlinks`) on the `root` path they are using, but at + least you will no longer be punished for "trivial" unclean paths. + +## [0.4.0] - 2025-01-13 ## + +### Breaking #### +- `SecureJoin(VFS)` will now return an error if the provided `root` is not a + `filepath.Clean`'d path. + + While it is ultimately the responsibility of the caller to ensure the root is + a safe path to use, passing a path like `/symlink/..` as a root would result + in the `SecureJoin`'d path being placed in `/` even though `/symlink/..` + might be a different directory, and so we should more strongly discourage + such usage. + + All major users of `securejoin.SecureJoin` already ensure that the paths they + provide are safe (and this is ultimately a question of user error), but + removing this foot-gun is probably a good idea. Of course, this is + necessarily a breaking API change (though we expect no real users to be + affected by it). + + Thanks to [Erik Sjölund](https://github.com/eriksjolund), who initially + reported this issue as a possible security issue. + +- `MkdirAll` and `MkdirHandle` now take an `os.FileMode`-style mode argument + instead of a raw `unix.S_*`-style mode argument, which may cause compile-time + type errors depending on how you use `filepath-securejoin`. For most users, + there will be no change in behaviour aside from the type change (as the + bottom `0o777` bits are the same in both formats, and most users are probably + only using those bits). + + However, if you were using `unix.S_ISVTX` to set the sticky bit with + `MkdirAll(Handle)` you will need to switch to `os.ModeSticky` otherwise you + will get a runtime error with this update. In addition, the error message you + will get from passing `unix.S_ISUID` and `unix.S_ISGID` will be different as + they are treated as invalid bits now (note that previously passing said bits + was also an error). + +## [0.3.6] - 2024-12-17 ## + +### Compatibility ### +- The minimum Go version requirement for `filepath-securejoin` is now Go 1.18 + (we use generics internally). + + For reference, `filepath-securejoin@v0.3.0` somewhat-arbitrarily bumped the + Go version requirement to 1.21. + + While we did make some use of Go 1.21 stdlib features (and in principle Go + versions <= 1.21 are no longer even supported by upstream anymore), some + downstreams have complained that the version bump has meant that they have to + do workarounds when backporting fixes that use the new `filepath-securejoin` + API onto old branches. This is not an ideal situation, but since using this + library is probably better for most downstreams than a hand-rolled + workaround, we now have compatibility shims that allow us to build on older + Go versions. +- Lower minimum version requirement for `golang.org/x/sys` to `v0.18.0` (we + need the wrappers for `fsconfig(2)`), which should also make backporting + patches to older branches easier. + +## [0.3.5] - 2024-12-06 ## + +### Fixed ### +- `MkdirAll` will now no longer return an `EEXIST` error if two racing + processes are creating the same directory. We will still verify that the path + is a directory, but this will avoid spurious errors when multiple threads or + programs are trying to `MkdirAll` the same path. opencontainers/runc#4543 + ## [0.3.4] - 2024-10-09 ## ### Fixed ### @@ -164,8 +238,12 @@ This is our first release of `github.com/cyphar/filepath-securejoin`, containing a full implementation with a coverage of 93.5% (the only missing cases are the error cases, which are hard to mocktest at the moment). -[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...HEAD -[0.3.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4 +[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...HEAD +[0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1 +[0.4.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...v0.4.0 +[0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6 +[0.3.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...v0.3.5 +[0.3.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4 [0.3.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.2...v0.3.3 [0.3.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...v0.3.2 [0.3.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.0...v0.3.1 diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index 42045aca..267577d4 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.3.4 +0.4.1 diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go new file mode 100644 index 00000000..42452bbf --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go @@ -0,0 +1,18 @@ +//go:build linux && go1.20 + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "fmt" +) + +// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except +// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) +// is only guaranteed to give you baseErr. +func wrapBaseError(baseErr, extraErr error) error { + return fmt.Errorf("%w: %w", extraErr, baseErr) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go new file mode 100644 index 00000000..e7adca3f --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go @@ -0,0 +1,38 @@ +//go:build linux && !go1.20 + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "fmt" +) + +type wrappedError struct { + inner error + isError error +} + +func (err wrappedError) Is(target error) bool { + return err.isError == target +} + +func (err wrappedError) Unwrap() error { + return err.inner +} + +func (err wrappedError) Error() string { + return fmt.Sprintf("%v: %v", err.isError, err.inner) +} + +// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except +// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) +// is only guaranteed to give you baseErr. +func wrapBaseError(baseErr, extraErr error) error { + return wrappedError{ + inner: baseErr, + isError: extraErr, + } +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go new file mode 100644 index 00000000..ddd6fa9a --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go @@ -0,0 +1,32 @@ +//go:build linux && go1.21 + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "slices" + "sync" +) + +func slices_DeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S { + return slices.DeleteFunc(slice, delFn) +} + +func slices_Contains[S ~[]E, E comparable](slice S, val E) bool { + return slices.Contains(slice, val) +} + +func slices_Clone[S ~[]E, E any](slice S) S { + return slices.Clone(slice) +} + +func sync_OnceValue[T any](f func() T) func() T { + return sync.OnceValue(f) +} + +func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { + return sync.OnceValues(f) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go new file mode 100644 index 00000000..f1e6fe7e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go @@ -0,0 +1,124 @@ +//go:build linux && !go1.21 + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "sync" +) + +// These are very minimal implementations of functions that appear in Go 1.21's +// stdlib, included so that we can build on older Go versions. Most are +// borrowed directly from the stdlib, and a few are modified to be "obviously +// correct" without needing to copy too many other helpers. + +// clearSlice is equivalent to the builtin clear from Go 1.21. +// Copied from the Go 1.24 stdlib implementation. +func clearSlice[S ~[]E, E any](slice S) { + var zero E + for i := range slice { + slice[i] = zero + } +} + +// Copied from the Go 1.24 stdlib implementation. +func slices_IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// Copied from the Go 1.24 stdlib implementation. +func slices_DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := slices_IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Similar to the stdlib slices.Contains, except that we don't have +// slices.Index so we need to use slices.IndexFunc for this non-Func helper. +func slices_Contains[S ~[]E, E comparable](s S, v E) bool { + return slices_IndexFunc(s, func(e E) bool { return e == v }) >= 0 +} + +// Copied from the Go 1.24 stdlib implementation. +func slices_Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Copied from the Go 1.24 stdlib implementation. +func sync_OnceValue[T any](f func() T) func() T { + var ( + once sync.Once + valid bool + p any + result T + ) + g := func() { + defer func() { + p = recover() + if !valid { + panic(p) + } + }() + result = f() + f = nil + valid = true + } + return func() T { + once.Do(g) + if !valid { + panic(p) + } + return result + } +} + +// Copied from the Go 1.24 stdlib implementation. +func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { + var ( + once sync.Once + valid bool + p any + r1 T1 + r2 T2 + ) + g := func() { + defer func() { + p = recover() + if !valid { + panic(p) + } + }() + r1, r2 = f() + f = nil + valid = true + } + return func() (T1, T2) { + once.Do(g) + if !valid { + panic(p) + } + return r1, r2 + } +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index e0ee3f2b..e6634d47 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -1,5 +1,5 @@ // Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. +// Copyright (C) 2017-2025 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -24,6 +24,31 @@ func IsNotExist(err error) bool { return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) } +// errUnsafeRoot is returned if the user provides SecureJoinVFS with a path +// that contains ".." components. +var errUnsafeRoot = errors.New("root path provided to SecureJoin contains '..' components") + +// stripVolume just gets rid of the Windows volume included in a path. Based on +// some godbolt tests, the Go compiler is smart enough to make this a no-op on +// Linux. +func stripVolume(path string) string { + return path[len(filepath.VolumeName(path)):] +} + +// hasDotDot checks if the path contains ".." components in a platform-agnostic +// way. +func hasDotDot(path string) bool { + // If we are on Windows, strip any volume letters. It turns out that + // C:..\foo may (or may not) be a valid pathname and we need to handle that + // leading "..". + path = stripVolume(path) + // Look for "/../" in the path, but we need to handle leading and trailing + // ".."s by adding separators. Doing this with filepath.Separator is ugly + // so just convert to Unix-style "/" first. + path = filepath.ToSlash(path) + return strings.Contains("/"+path+"/", "/../") +} + // SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except // that the returned path is guaranteed to be scoped inside the provided root // path (when evaluated). Any symbolic links in the path are evaluated with the @@ -46,7 +71,22 @@ func IsNotExist(err error) bool { // provided via direct input or when evaluating symlinks. Therefore: // // "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt" +// +// If the provided root is not [filepath.Clean] then an error will be returned, +// as such root paths are bordering on somewhat unsafe and using such paths is +// not best practice. We also strongly suggest that any root path is first +// fully resolved using [filepath.EvalSymlinks] or otherwise constructed to +// avoid containing symlink components. Of course, the root also *must not* be +// attacker-controlled. func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { + // The root path must not contain ".." components, otherwise when we join + // the subpath we will end up with a weird path. We could work around this + // in other ways but users shouldn't be giving us non-lexical root paths in + // the first place. + if hasDotDot(root) { + return "", errUnsafeRoot + } + // Use the os.* VFS implementation if none was specified. if vfs == nil { vfs = osVFS{} @@ -59,9 +99,10 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { linksWalked int ) for remainingPath != "" { - if v := filepath.VolumeName(remainingPath); v != "" { - remainingPath = remainingPath[len(v):] - } + // On Windows, if we managed to end up at a path referencing a volume, + // drop the volume to make sure we don't end up with broken paths or + // escaping the root volume. + remainingPath = stripVolume(remainingPath) // Get the next path component. var part string diff --git a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go index 290befa1..be81e498 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go @@ -12,7 +12,6 @@ import ( "os" "path" "path/filepath" - "slices" "strings" "golang.org/x/sys/unix" @@ -113,7 +112,7 @@ func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) erro return nil } // Split the link target and clean up any "" parts. - linkTargetParts := slices.DeleteFunc( + linkTargetParts := slices_DeleteFunc( strings.Split(linkTarget, "/"), func(part string) bool { return part == "" || part == "." }) diff --git a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go index b5f67452..a17ae3b0 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go @@ -11,7 +11,6 @@ import ( "fmt" "os" "path/filepath" - "slices" "strings" "golang.org/x/sys/unix" @@ -22,6 +21,33 @@ var ( errPossibleAttack = errors.New("possible attack detected") ) +// modePermExt is like os.ModePerm except that it also includes the set[ug]id +// and sticky bits. +const modePermExt = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky + +//nolint:cyclop // this function needs to handle a lot of cases +func toUnixMode(mode os.FileMode) (uint32, error) { + sysMode := uint32(mode.Perm()) + if mode&os.ModeSetuid != 0 { + sysMode |= unix.S_ISUID + } + if mode&os.ModeSetgid != 0 { + sysMode |= unix.S_ISGID + } + if mode&os.ModeSticky != 0 { + sysMode |= unix.S_ISVTX + } + // We don't allow file type bits. + if mode&os.ModeType != 0 { + return 0, fmt.Errorf("%w %+.3o (%s): type bits not permitted", errInvalidMode, mode, mode) + } + // We don't allow other unknown modes. + if mode&^modePermExt != 0 || sysMode&unix.S_IFMT != 0 { + return 0, fmt.Errorf("%w %+.3o (%s): unknown mode bits", errInvalidMode, mode, mode) + } + return sysMode, nil +} + // MkdirAllHandle is equivalent to [MkdirAll], except that it is safer to use // in two respects: // @@ -40,17 +66,17 @@ var ( // a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after // doing [MkdirAll]. If you intend to open the directory after creating it, you // should use MkdirAllHandle. -func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err error) { - // Make sure there are no os.FileMode bits set. - if mode&^0o7777 != 0 { - return nil, fmt.Errorf("%w for mkdir 0o%.3o", errInvalidMode, mode) +func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.File, Err error) { + unixMode, err := toUnixMode(mode) + if err != nil { + return nil, err } // On Linux, mkdirat(2) (and os.Mkdir) silently ignore the suid and sgid // bits. We could also silently ignore them but since we have very few // users it seems more prudent to return an error so users notice that // these bits will not be set. - if mode&^0o1777 != 0 { - return nil, fmt.Errorf("%w for mkdir 0o%.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode) + if unixMode&^0o1777 != 0 { + return nil, fmt.Errorf("%w for mkdir %+.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode) } // Try to open as much of the path as possible. @@ -93,7 +119,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err } remainingParts := strings.Split(remainingPath, string(filepath.Separator)) - if slices.Contains(remainingParts, "..") { + if slices_Contains(remainingParts, "..") { // The path contained ".." components after the end of the "real" // components. We could try to safely resolve ".." here but that would // add a bunch of extra logic for something that it's not clear even @@ -105,9 +131,6 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath) } - // Make sure the mode doesn't have any type bits. - mode &^= unix.S_IFMT - // Create the remaining components. for _, part := range remainingParts { switch part { @@ -119,11 +142,20 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err // NOTE: mkdir(2) will not follow trailing symlinks, so we can safely // create the final component without worrying about symlink-exchange // attacks. - if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil { + // + // If we get -EEXIST, it's possible that another program created the + // directory at the same time as us. In that case, just continue on as + // if we created it (if the created inode is not a directory, the + // following open call will fail). + if err := unix.Mkdirat(int(currentDir.Fd()), part, unixMode); err != nil && !errors.Is(err, unix.EEXIST) { err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} // Make the error a bit nicer if the directory is dead. - if err2 := isDeadInode(currentDir); err2 != nil { - err = fmt.Errorf("%w (%w)", err, err2) + if deadErr := isDeadInode(currentDir); deadErr != nil { + // TODO: Once we bump the minimum Go version to 1.20, we can use + // multiple %w verbs for this wrapping. For now we need to use a + // compatibility shim for older Go versions. + //err = fmt.Errorf("%w (%w)", err, deadErr) + err = wrapBaseError(err, deadErr) } return nil, err } @@ -188,10 +220,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err // If you plan to open the directory after you have created it or want to use // an open directory handle as the root, you should use [MkdirAllHandle] instead. // This function is a wrapper around [MkdirAllHandle]. -// -// NOTE: The mode argument must be set the unix mode bits (unix.S_I...), not -// the Go generic mode bits ([os.FileMode]...). -func MkdirAll(root, unsafePath string, mode int) error { +func MkdirAll(root, unsafePath string, mode os.FileMode) error { rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) if err != nil { return err diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go index ae3b381e..f7a13e69 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go @@ -12,12 +12,11 @@ import ( "os" "path/filepath" "strings" - "sync" "golang.org/x/sys/unix" ) -var hasOpenat2 = sync.OnceValue(func() bool { +var hasOpenat2 = sync_OnceValue(func() bool { fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ Flags: unix.O_PATH | unix.O_CLOEXEC, Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, diff --git a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go index 8cc827d7..809a579c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go @@ -12,7 +12,6 @@ import ( "os" "runtime" "strconv" - "sync" "golang.org/x/sys/unix" ) @@ -54,7 +53,7 @@ func verifyProcRoot(procRoot *os.File) error { return nil } -var hasNewMountApi = sync.OnceValue(func() bool { +var hasNewMountApi = sync_OnceValue(func() bool { // All of the pieces of the new mount API we use (fsopen, fsconfig, // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can // just check for one of the syscalls and the others should also be @@ -192,11 +191,11 @@ func doGetProcRoot() (*os.File, error) { return procRoot, err } -var getProcRoot = sync.OnceValues(func() (*os.File, error) { +var getProcRoot = sync_OnceValues(func() (*os.File, error) { return doGetProcRoot() }) -var hasProcThreadSelf = sync.OnceValue(func() bool { +var hasProcThreadSelf = sync_OnceValue(func() bool { return unix.Access("/proc/thread-self/", unix.F_OK) == nil }) @@ -265,12 +264,20 @@ func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThread Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, }) if err != nil { - return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err) + // TODO: Once we bump the minimum Go version to 1.20, we can use + // multiple %w verbs for this wrapping. For now we need to use a + // compatibility shim for older Go versions. + //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) + return nil, nil, wrapBaseError(err, errUnsafeProcfs) } } else { handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) if err != nil { - return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err) + // TODO: Once we bump the minimum Go version to 1.20, we can use + // multiple %w verbs for this wrapping. For now we need to use a + // compatibility shim for older Go versions. + //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) + return nil, nil, wrapBaseError(err, errUnsafeProcfs) } defer func() { if Err != nil { @@ -289,12 +296,17 @@ func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThread return handle, runtime.UnlockOSThread, nil } -var hasStatxMountId = sync.OnceValue(func() bool { +// STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to +// avoid bumping the requirement for a single constant we can just define it +// ourselves. +const STATX_MNT_ID_UNIQUE = 0x4000 + +var hasStatxMountId = sync_OnceValue(func() bool { var ( stx unix.Statx_t // We don't care which mount ID we get. The kernel will give us the // unique one if it is supported. - wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID + wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID ) err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx) return err == nil && stx.Mask&wantStxMask != 0 @@ -310,7 +322,7 @@ func getMountId(dir *os.File, path string) (uint64, error) { stx unix.Statx_t // We don't care which mount ID we get. The kernel will give us the // unique one if it is supported. - wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID + wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID ) err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx) diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go deleted file mode 100644 index 2c3ebe16..00000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go +++ /dev/null @@ -1,27 +0,0 @@ -package challenge - -import ( - "net/url" - "strings" -) - -// FROM: https://golang.org/src/net/http/http.go -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -// FROM: http://golang.org/src/net/http/transport.go -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -// FROM: http://golang.org/src/net/http/transport.go -func canonicalAddr(url *url.URL) string { - addr := url.Host - if !hasPort(addr) { - return addr + ":" + portMap[url.Scheme] - } - return addr -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go deleted file mode 100644 index fe238210..00000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ /dev/null @@ -1,237 +0,0 @@ -package challenge - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "sync" -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // Scheme is the auth-scheme according to RFC 2617 - Scheme string - - // Parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -// Manager manages the challenges for endpoints. -// The challenges are pulled out of HTTP responses. Only -// responses which expect challenges should be added to -// the manager, since a non-unauthorized request will be -// viewed as not requiring challenges. -type Manager interface { - // GetChallenges returns the challenges for the given - // endpoint URL. - GetChallenges(endpoint url.URL) ([]Challenge, error) - - // AddResponse adds the response to the challenge - // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the - // URL which was produced the response. If the - // response was authorized, any challenges for the - // endpoint will be cleared. - AddResponse(resp *http.Response) error -} - -// NewSimpleManager returns an instance of -// Manger which only maps endpoints to challenges -// based on the responses which have been added the -// manager. The simple manager will make no attempt to -// perform requests on the endpoints or cache the responses -// to a backend. -func NewSimpleManager() Manager { - return &simpleManager{ - Challenges: make(map[string][]Challenge), - } -} - -type simpleManager struct { - sync.RWMutex - Challenges map[string][]Challenge -} - -func normalizeURL(endpoint *url.URL) { - endpoint.Host = strings.ToLower(endpoint.Host) - endpoint.Host = canonicalAddr(endpoint) -} - -func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { - normalizeURL(&endpoint) - - m.RLock() - defer m.RUnlock() - challenges := m.Challenges[endpoint.String()] - return challenges, nil -} - -func (m *simpleManager) AddResponse(resp *http.Response) error { - challenges := ResponseChallenges(resp) - if resp.Request == nil { - return fmt.Errorf("missing request reference") - } - urlCopy := url.URL{ - Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, - Scheme: resp.Request.URL.Scheme, - } - normalizeURL(&urlCopy) - - m.Lock() - defer m.Unlock() - m.Challenges[urlCopy.String()] = challenges - return nil -} - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ResponseChallenges returns a list of authorization challenges -// for the given http Response. Challenges are only checked if -// the response status code was a 401. -func ResponseChallenges(resp *http.Response) []Challenge { - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header) - } - - return nil -} - -func parseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go index 1936234b..93863480 100644 --- a/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -15,27 +15,30 @@ type Program interface { // ProgramFunc is a type of function that initializes programs based on arguments. type ProgramFunc func(args ...string) Program -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return NewShellProgramFuncWithEnv(name, nil) +// NewShellProgramFunc creates a [ProgramFunc] to run command in a [Shell]. +func NewShellProgramFunc(command string) ProgramFunc { + return func(args ...string) Program { + return createProgramCmdRedirectErr(command, args, nil) + } } -// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables -func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { +// NewShellProgramFuncWithEnv creates a [ProgramFunc] tu run command +// in a [Shell] with the given environment variables. +func NewShellProgramFuncWithEnv(command string, env *map[string]string) ProgramFunc { return func(args ...string) Program { - return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + return createProgramCmdRedirectErr(command, args, env) } } -func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { - programCmd := exec.Command(commandName, args...) +func createProgramCmdRedirectErr(command string, args []string, env *map[string]string) *Shell { + ec := exec.Command(command, args...) if env != nil { for k, v := range *env { - programCmd.Env = append(programCmd.Environ(), k+"="+v) + ec.Env = append(ec.Environ(), k+"="+v) } } - programCmd.Stderr = os.Stderr - return programCmd + ec.Stderr = os.Stderr + return &Shell{cmd: ec} } // Shell invokes shell commands to talk with a remote credentials-helper. diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 5f93eeb4..c7c64947 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -2,7 +2,10 @@ # This file lists all contributors to the repository. # See hack/generate-authors.sh to make modifications. +17neverends +7sunarni <710720732@qq.com> Aanand Prasad +Aarni Koskela Aaron Davidson Aaron Feng Aaron Hnatiw @@ -11,6 +14,7 @@ Aaron L. Xu Aaron Lehmann Aaron Welch Aaron Yoshitake +Abdur Rehman Abel Muiño Abhijeet Kasurde Abhinandan Prativadi @@ -24,9 +28,11 @@ Adam Avilla Adam Dobrawy Adam Eijdenberg Adam Kunk +Adam Lamers Adam Miller Adam Mills Adam Pointer +Adam Simon Adam Singer Adam Thornton Adam Walz @@ -119,6 +125,7 @@ amangoel Amen Belayneh Ameya Gawde Amir Goldstein +AmirBuddy Amit Bakshi Amit Krishnan Amit Shukla @@ -168,6 +175,7 @@ Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins +Andrés Maldonado Andy Chambers andy diller Andy Goldstein @@ -182,6 +190,7 @@ Anes Hasicic Angel Velazquez Anil Belur Anil Madhavapeddy +Anirudh Aithal Ankit Jain Ankush Agarwal Anonmily @@ -219,7 +228,8 @@ Artur Meyster Arun Gupta Asad Saeeduddin Asbjørn Enge -Austin Vazquez +Ashly Mathew +Austin Vazquez averagehuman Avi Das Avi Kivity @@ -285,6 +295,7 @@ Brandon Liu Brandon Philips Brandon Rhodes Brendan Dixon +Brendon Smith Brennan Kinney <5098581+polarathene@users.noreply.github.com> Brent Salisbury Brett Higgins @@ -339,12 +350,14 @@ Casey Bisson Catalin Pirvu Ce Gao Cedric Davies +Cesar Talledo Cezar Sa Espinola Chad Swenson Chance Zibolski Chander Govindarajan Chanhun Jeong Chao Wang +Charity Kathure Charles Chan Charles Hooper Charles Law @@ -366,6 +379,7 @@ Chen Qiu Cheng-mean Liu Chengfei Shang Chengguang Xu +Chengyu Zhu Chentianze Chenyang Yan chenyuzhu @@ -480,6 +494,7 @@ Daniel Farrell Daniel Garcia Daniel Gasienica Daniel Grunwell +Daniel Guns Daniel Helfand Daniel Hiltgen Daniel J Walsh @@ -763,6 +778,7 @@ Frank Macreery Frank Rosquin Frank Villaro-Dixon Frank Yang +François Scala Fred Lifton Frederick F. Kautz IV Frederico F. de Oliveira @@ -798,6 +814,7 @@ GennadySpb Geoff Levand Geoffrey Bachelet Geon Kim +George Adams George Kontridze George Ma George MacRorie @@ -826,6 +843,7 @@ Gopikannan Venugopalsamy Gosuke Miyashita Gou Rao Govinda Fichtner +Grace Choi Grant Millar Grant Reaber Graydon Hoare @@ -966,6 +984,7 @@ James Nugent James Sanders James Turnbull James Watkins-Harvey +Jameson Hyde Jamie Hannaford Jamshid Afshar Jan Breig @@ -1064,13 +1083,16 @@ Jim Perrin Jimmy Cuadra Jimmy Puckett Jimmy Song +jinjiadu Jinsoo Park Jintao Zhang Jiri Appl Jiri Popelka Jiuyue Ma Jiří Župka +jjimbo137 <115816493+jjimbo137@users.noreply.github.com> Joakim Roubert +Joan Grau Joao Fernandes Joao Trindade Joe Beda @@ -1155,6 +1177,7 @@ Josiah Kiehl José Tomás Albornoz Joyce Jang JP +JSchltggr Julian Taylor Julien Barbier Julien Bisconti @@ -1189,6 +1212,7 @@ K. Heller Kai Blin Kai Qiang Wu (Kennan) Kaijie Chen +Kaita Nakamura Kamil Domański Kamjar Gerami Kanstantsin Shautsou @@ -1263,6 +1287,7 @@ Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister Kristian Haugene +Kristian Heljas Kristina Zabunova Krystian Wojcicki Kunal Kushwaha @@ -1289,6 +1314,7 @@ Laura Brehm Laura Frank Laurent Bernaille Laurent Erignoux +Laurent Goderre Laurie Voss Leandro Motta Barros Leandro Siqueira @@ -1369,6 +1395,7 @@ Madhan Raj Mookkandy Madhav Puri Madhu Venugopal Mageee +maggie44 <64841595+maggie44@users.noreply.github.com> Mahesh Tiyyagura malnick Malte Janduda @@ -1462,6 +1489,7 @@ Matthias Kühnle Matthias Rampke Matthieu Fronton Matthieu Hauglustaine +Matthieu MOREL Mattias Jernberg Mauricio Garavaglia mauriyouth @@ -1579,6 +1607,7 @@ Muayyad Alsadi Muhammad Zohaib Aslam Mustafa Akın Muthukumar R +Myeongjoon Kim Máximo Cuadros Médi-Rémi Hashim Nace Oroz @@ -1593,6 +1622,7 @@ Natasha Jarus Nate Brennand Nate Eagleson Nate Jones +Nathan Baulch Nathan Carlson Nathan Herald Nathan Hsieh @@ -1655,6 +1685,7 @@ Nuutti Kotivuori nzwsch O.S. Tezer objectified +Octol1ttle Odin Ugedal Oguz Bilgic Oh Jinkyun @@ -1689,6 +1720,7 @@ Patrick Hemmer Patrick St. laurent Patrick Stapleton Patrik Cyvoct +Patrik Leifert pattichen Paul "TBBle" Hampson Paul @@ -1763,6 +1795,7 @@ Pierre Carrier Pierre Dal-Pra Pierre Wacrenier Pierre-Alain RIVIERE +pinglanlu Piotr Bogdan Piotr Karbowski Porjo @@ -1790,6 +1823,7 @@ Quentin Tayssier r0n22 Rachit Sharma Radostin Stoyanov +Rafael Fernández López Rafal Jeczalik Rafe Colton Raghavendra K T @@ -1845,6 +1879,7 @@ Robert Obryk Robert Schneider Robert Shade Robert Stern +Robert Sturla Robert Terhaar Robert Wallis Robert Wang @@ -1856,7 +1891,7 @@ Robin Speekenbrink Robin Thoni robpc Rodolfo Carvalho -Rodrigo Campos +Rodrigo Campos Rodrigo Vaz Roel Van Nyen Roger Peppe @@ -1995,6 +2030,7 @@ Sevki Hasirci Shane Canon Shane da Silva Shaun Kaasten +Shaun Thompson shaunol Shawn Landden Shawn Siefkas @@ -2013,6 +2049,7 @@ Shijun Qin Shishir Mahajan Shoubhik Bose Shourya Sarcar +Shreenidhi Shedi Shu-Wai Chow shuai-z Shukui Yang @@ -2100,6 +2137,7 @@ Sébastien Stormacq Sören Tempel Tabakhase Tadej Janež +Tadeusz Dudkiewicz Takuto Sato tang0th Tangi Colin @@ -2107,6 +2145,7 @@ Tatsuki Sugiura Tatsushi Inagaki Taylan Isikdemir Taylor Jones +tcpdumppy <847462026@qq.com> Ted M. Young Tehmasp Chaudhri Tejaswini Duggaraju @@ -2391,6 +2430,7 @@ You-Sheng Yang (楊有勝) youcai Youcef YEKHLEF Youfu Zhang +YR Chen Yu Changchun Yu Chengxia Yu Peng diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index 93d64cd8..702d3dca 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -1,9 +1,9 @@ -package api // import "github.com/docker/docker/api" +package api // Common constants for daemon and client. const ( // DefaultVersion of the current REST API. - DefaultVersion = "1.47" + DefaultVersion = "1.51" // MinSupportedAPIVersion is the minimum API version that can be supported // by the API server, specified as "major.minor". Note that the daemon diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 7164e1eb..3880635d 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.47" +basePath: "/v1.51" info: title: "Docker Engine API" - version: "1.47" + version: "1.51" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,14 +55,14 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.47) is used. - For example, calling `/info` is the same as calling `/v1.47/info`. Using the + If you omit the version-prefix, the current version of the API (v1.50) is used. + For example, calling `/info` is the same as calling `/v1.51/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. - The API uses an open schema model, which means server may add extra properties + The API uses an open schema model, which means the server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer @@ -212,6 +212,7 @@ definitions: - `bind` a mount of a file or directory from the host into the container. - `volume` a docker volume with the given `Name`. + - `image` a docker image - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - `cluster` a Swarm cluster volume @@ -219,6 +220,7 @@ definitions: enum: - "bind" - "volume" + - "image" - "tmpfs" - "npipe" - "cluster" @@ -350,6 +352,7 @@ definitions: - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `image` Mounts an image. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. - `cluster` a Swarm cluster volume @@ -357,6 +360,7 @@ definitions: enum: - "bind" - "volume" + - "image" - "tmpfs" - "npipe" - "cluster" @@ -431,6 +435,14 @@ definitions: description: "Source path inside the volume. Must be relative without any back traversals." type: "string" example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" TmpfsOptions: description: "Optional configuration for the `tmpfs` type." type: "object" @@ -953,13 +965,18 @@ definitions: ContainerIDFile: type: "string" description: "Path to a file where the container ID is written" + example: "" LogConfig: type: "object" description: "The logging configuration for this container" properties: Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. type: "string" enum: + - "local" - "json-file" - "syslog" - "journald" @@ -970,9 +987,14 @@ definitions: - "etwlogs" - "none" Config: + description: |- + Driver-specific configuration options for the logging driver. type: "object" additionalProperties: type: "string" + example: + "max-file": "5" + "max-size": "10m" NetworkMode: type: "string" description: | @@ -1015,6 +1037,7 @@ definitions: items: type: "integer" minimum: 0 + example: [80, 64] Annotations: type: "object" description: | @@ -1117,7 +1140,8 @@ definitions: - `"host"`: use the host's PID namespace inside the container Privileged: type: "boolean" - description: "Gives the container full access to the host." + description: |- + Gives the container full access to the host. PublishAllPorts: type: "boolean" description: | @@ -1174,18 +1198,20 @@ definitions: minimum: 0 Sysctls: type: "object" - description: | + x-nullable: true + description: |- A list of kernel parameters (sysctls) to set in the container. - For example: - ``` - {"net.ipv4.ip_forward": "1"} - ``` + This field is omitted if not set. additionalProperties: type: "string" + example: + "net.ipv4.ip_forward": "1" Runtime: type: "string" - description: "Runtime to use with this container." + x-nullable: true + description: |- + Runtime to use with this container. # Applicable to Windows Isolation: type: "string" @@ -1195,6 +1221,7 @@ definitions: - "default" - "process" - "hyperv" + - "" MaskedPaths: type: "array" description: | @@ -1202,6 +1229,18 @@ definitions: the default set of paths). items: type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" ReadonlyPaths: type: "array" description: | @@ -1209,6 +1248,12 @@ definitions: (this overrides the default set of paths). items: type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" ContainerConfig: description: | @@ -1225,8 +1270,14 @@ definitions: The domain name to use for the container. type: "string" User: - description: "The user that commands are run as inside the container." + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`[<:group-name|GID>]`). type: "string" + example: "123:456" AttachStdin: description: "Whether to attach to `stdin`." type: "boolean" @@ -1377,63 +1428,10 @@ definitions: when starting a container from the image. type: "object" properties: - Hostname: - description: | - The hostname to use for the container, as a valid RFC 1123 hostname. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. - type: "string" - example: "" - Domainname: - description: | - The domain name to use for the container. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. - type: "string" - example: "" User: description: "The user that commands are run as inside the container." type: "string" example: "web:web" - AttachStdin: - description: | - Whether to attach to `stdin`. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false - AttachStdout: - description: | - Whether to attach to `stdout`. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false - AttachStderr: - description: | - Whether to attach to `stderr`. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false ExposedPorts: description: | An object mapping ports to an empty object in the form: @@ -1450,39 +1448,6 @@ definitions: "80/tcp": {}, "443/tcp": {} } - Tty: - description: | - Attach standard streams to a TTY, including `stdin` if it is not closed. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false - OpenStdin: - description: | - Open `stdin` - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false - StdinOnce: - description: | - Close `stdin` after one attached client disconnects. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false Env: description: | A list of environment variables to set inside the container in the @@ -1508,18 +1473,6 @@ definitions: default: false example: false x-nullable: true - Image: - description: | - The name (or reference) of the image to use when creating the container, - or which was used when the container was created. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. - type: "string" - default: "" - example: "" Volumes: description: | An object mapping mount point paths inside the container to empty @@ -1548,30 +1501,6 @@ definitions: items: type: "string" example: [] - NetworkDisabled: - description: | - Disable networking for the container. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false - x-nullable: true - MacAddress: - description: | - MAC address of the container. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. - type: "string" - default: "" - example: "" - x-nullable: true OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. @@ -1594,17 +1523,6 @@ definitions: type: "string" example: "SIGTERM" x-nullable: true - StopTimeout: - description: | - Timeout to stop a container in seconds. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. - type: "integer" - default: 10 - x-nullable: true Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. @@ -1615,19 +1533,11 @@ definitions: example: ["/bin/sh", "-c"] # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. example: - "Hostname": "" - "Domainname": "" "User": "web:web" - "AttachStdin": false - "AttachStdout": false - "AttachStderr": false "ExposedPorts": { "80/tcp": {}, "443/tcp": {} } - "Tty": false - "OpenStdin": false - "StdinOnce": false "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] "Cmd": ["/bin/sh"] "Healthcheck": { @@ -1639,7 +1549,6 @@ definitions: "StartInterval": 0 } "ArgsEscaped": true - "Image": "" "Volumes": { "/app/data": {}, "/app/config": {} @@ -1917,7 +1826,7 @@ definitions: type: "string" example: "4443" - GraphDriverData: + DriverData: description: | Information about the storage driver used to store the container's and image's filesystem. @@ -1991,6 +1900,33 @@ definitions: type: "string" x-nullable: false example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" RepoTags: description: | List of image names/tags in the local image cache that reference this @@ -2107,7 +2043,7 @@ definitions: format: "int64" example: 1239828 GraphDriver: - $ref: "#/definitions/GraphDriverData" + $ref: "#/definitions/DriverData" RootFS: description: | Information about the image's RootFS, including the layer IDs. @@ -2260,8 +2196,7 @@ definitions: Number of containers using this image. Includes both stopped and running containers. - This size is not calculated by default, and depends on which API endpoint - is used. `-1` indicates that the value has not been set / calculated. + `-1` indicates that the value has not been set / calculated. x-nullable: false type: "integer" example: 2 @@ -2278,6 +2213,18 @@ definitions: x-omitempty: true items: $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" AuthConfig: type: "object" @@ -2497,6 +2444,11 @@ definitions: `overlay`). type: "string" example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + example: true EnableIPv6: description: | Whether the network was created with IPv6 enabled. @@ -2697,12 +2649,24 @@ definitions: type: "string" error: type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. progressDetail: $ref: "#/definitions/ProgressDetail" aux: @@ -2802,12 +2766,24 @@ definitions: type: "string" error: type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. progressDetail: $ref: "#/definitions/ProgressDetail" @@ -2816,13 +2792,44 @@ definitions: properties: error: type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. progressDetail: $ref: "#/definitions/ProgressDetail" + DeviceInfo: + type: "object" + description: | + DeviceInfo represents a device that can be used by a container. + properties: + Source: + type: "string" + example: "cdi" + description: | + The origin device driver. + ID: + type: "string" + example: "vendor.com/gpu=0" + description: | + The unique identifier for the device within its source driver. + For CDI devices, this would be an FQDN like "vendor.com/gpu=0". + ErrorDetail: type: "object" properties: @@ -2851,9 +2858,10 @@ definitions: example: message: "Something went wrong." - IdResponse: + IDResponse: description: "Response to an API call that returns just an Id" type: "object" + x-go-name: "IDResponse" required: ["Id"] properties: Id: @@ -2898,6 +2906,17 @@ definitions: example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "integer" + format: "int64" + example: + - 10 # Operational data NetworkID: @@ -4180,6 +4199,7 @@ definitions: - "default" - "process" - "hyperv" + - "" Init: description: | Run an init inside the container that forwards signals and reaps @@ -4984,76 +5004,346 @@ definitions: Warnings: - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - ContainerSummary: + ContainerInspectResponse: type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" properties: Id: - description: "The ID of this container" + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). type: "string" x-go-name: "ID" - Names: - description: "The names that this container has been given" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" type: "array" items: type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" Image: - description: "The name of the image used when creating this container" + description: |- + The ID (digest) of the image that this container was created from. type: "string" - ImageID: - description: "The ID of the image that this container was created from" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. type: "string" - Command: - description: "Command to run when starting the container" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. type: "string" - Created: - description: "When the container was created" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. type: "integer" - format: "int64" - Ports: - description: "The ports exposed by this container" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. type: "array" items: - $ref: "#/definitions/Port" + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" SizeRw: - description: "The size of files that have been created or changed by this container" + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. type: "integer" format: "int64" + x-nullable: true + example: "122880" SizeRootFs: - description: "The total size of all the files in this container" - type: "integer" + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + + ContainerSummary: + type: "object" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Names: + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). + type: "array" + items: + type: "string" + example: + - "/funny_chatelet" + Image: + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). + type: "string" + example: "docker.io/library/ubuntu:latest" + ImageID: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. + Command: + description: "Command to run when starting the container" + type: "string" + example: "/bin/bash" + Created: + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + format: "int64" + example: "1739811096" + Ports: + description: |- + Port-mappings for the container. + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" State: - description: "The state of this container (e.g. `Exited`)" + description: | + The state of this container. type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) type: "string" + example: "Up 4 days" HostConfig: type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. properties: NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. type: "string" + example: "mynetwork" Annotations: - description: "Arbitrary key-value metadata attached to container" + description: |- + Arbitrary key-value metadata attached to the container. type: "object" x-nullable: true additionalProperties: type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" NetworkSettings: - description: "A summary of the container's network settings" + description: |- + Summary of the container's network settings type: "object" properties: Networks: type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. additionalProperties: $ref: "#/definitions/EndpointSettings" Mounts: type: "array" + description: |- + List of mounts used by the container. items: $ref: "#/definitions/MountPoint" @@ -5093,8 +5383,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5145,8 +5438,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -5258,6 +5552,618 @@ definitions: type: "string" example: [] + ContainerUpdateResponse: + type: "object" + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. + properties: + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] + + ContainerStatsResponse: + description: | + Statistics sample for a container. + type: "object" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" + properties: + name: + description: "Name of the container" + type: "string" + x-nullable: true + example: "boring_wozniak" + id: + description: "ID of the container" + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). + + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + + ContainerBlkioStats: + description: | + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true + properties: + io_service_bytes_recursive: + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: "18446744073709551615" + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. when using cgroups v2. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + ContainerWaitResponse: description: "OK response to ContainerWait operation" type: "object" @@ -5507,13 +6413,28 @@ definitions: type: "boolean" example: true BridgeNfIptables: - description: "Indicates if `bridge-nf-call-iptables` is available on the host." + description: | + Indicates if `bridge-nf-call-iptables` is available on the host when + the daemon was started. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. type: "boolean" - example: true + example: false BridgeNfIp6tables: - description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." + description: | + Indicates if `bridge-nf-call-ip6tables` is available on the host. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand, and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. type: "boolean" - example: true + example: false Debug: description: | Indicates if the daemon is running in debug-mode / with debug-level @@ -5750,6 +6671,7 @@ definitions: - "default" - "hyperv" - "process" + - "" InitBinary: description: | Name and, optional, path of the `docker-init` binary. @@ -5809,6 +6731,17 @@ definitions: description: "The network pool size" type: "integer" example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" + DiscoveredDevices: + description: | + List of devices discovered by device drivers. + + Each device includes information about its source driver, kind, name, + and additional driver-specific attributes. + type: "array" + items: + $ref: "#/definitions/DeviceInfo" Warnings: description: | List of warnings / informational messages about missing features, or @@ -5820,8 +6753,6 @@ definitions: type: "string" example: - "WARNING: No memory limit support" - - "WARNING: bridge-nf-call-iptables is disabled" - - "WARNING: bridge-nf-call-ip6tables is disabled" CDISpecDirs: description: | List of directories where (Container Device Interface) CDI @@ -5894,6 +6825,37 @@ definitions: default: "plugins.moby" example: "plugins.moby" + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct PluginsInfo: @@ -5939,60 +6901,6 @@ definitions: type: "object" x-nullable: true properties: - AllowNondistributableArtifactsCIDRs: - description: | - List of IP ranges to which nondistributable artifacts can be pushed, - using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - AllowNondistributableArtifactsHostnames: - description: | - List of registry hostnames to which nondistributable artifacts can be - pushed, using the format `[:]` or `[:]`. - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - type: "array" - items: - type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax @@ -6000,7 +6908,7 @@ definitions: accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. - By default, local registries (`127.0.0.0/8`) are configured as + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as insecure. All other registries are secure. Communicating with an insecure registry is not possible if the daemon assumes that registry is secure. @@ -6162,11 +7070,6 @@ definitions: description: "Actual commit ID of external tool." type: "string" example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" - Expected: - description: | - Commit ID of external tool expected by dockerd as set at build time. - type: "string" - example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" SwarmInfo: description: | @@ -6335,7 +7238,7 @@ definitions: description: | The media type of the object this schema refers to. type: "string" - example: "application/vnd.docker.distribution.manifest.v2+json" + example: "application/vnd.oci.image.manifest.v1+json" digest: description: | The digest of the targeted content. @@ -6346,27 +7249,52 @@ definitions: The size in bytes of the blob. type: "integer" format: "int64" - example: 3987495 - # TODO Not yet including these fields for now, as they are nil / omitted in our response. - # urls: - # description: | - # List of URLs from which this object MAY be downloaded. - # type: "array" - # items: - # type: "string" - # format: "uri" - # annotations: - # description: | - # Arbitrary metadata relating to the targeted content. - # type: "object" - # additionalProperties: - # type: "string" - # platform: - # $ref: "#/definitions/OCIPlatform" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null OCIPlatform: type: "object" x-go-name: Platform + x-nullable: true description: | Describes the platform which the image in the manifest runs on, as defined in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). @@ -6836,143 +7764,6 @@ paths: type: "array" items: $ref: "#/definitions/ContainerSummary" - examples: - application/json: - - Id: "8dfafdbc3a40" - Names: - - "/boring_feynman" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 1" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: - - PrivatePort: 2222 - PublicPort: 3333 - Type: "tcp" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.docker.type: "container" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:02" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - - Id: "9cd87474be90" - Names: - - "/coolName" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 222222" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.docker.type: "container" - io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.8" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:08" - Mounts: [] - - Id: "3176a2479c92" - Names: - - "/sleepy_dog" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 3333333333333333" - Created: 1367854154 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.image.id: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - io.kubernetes.image.name: "ubuntu:latest" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.6" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:06" - Mounts: [] - - Id: "4cb07b47f9fb" - Names: - - "/running_cat" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 444444444444444444444444444444444" - Created: 1367854152 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.config.source: "api" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.5" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:05" - Mounts: [] 400: description: "bad parameter" schema: @@ -7197,238 +7988,7 @@ paths: 200: description: "no error" schema: - type: "object" - title: "ContainerInspectResponse" - properties: - Id: - description: "The ID of the container" - type: "string" - Created: - description: "The time the container was created" - type: "string" - Path: - description: "The path to the command being run" - type: "string" - Args: - description: "The arguments to the command being run" - type: "array" - items: - type: "string" - State: - $ref: "#/definitions/ContainerState" - Image: - description: "The container's image ID" - type: "string" - ResolvConfPath: - type: "string" - HostnamePath: - type: "string" - HostsPath: - type: "string" - LogPath: - type: "string" - Name: - type: "string" - RestartCount: - type: "integer" - Driver: - type: "string" - Platform: - type: "string" - MountLabel: - type: "string" - ProcessLabel: - type: "string" - AppArmorProfile: - type: "string" - ExecIDs: - description: "IDs of exec instances that are running in the container." - type: "array" - items: - type: "string" - x-nullable: true - HostConfig: - $ref: "#/definitions/HostConfig" - GraphDriver: - $ref: "#/definitions/GraphDriverData" - SizeRw: - description: | - The size of files that have been created or changed by this - container. - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container." - type: "integer" - format: "int64" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - Config: - $ref: "#/definitions/ContainerConfig" - NetworkSettings: - $ref: "#/definitions/NetworkSettings" - examples: - application/json: - AppArmorProfile: "" - Args: - - "-c" - - "exit 9" - Config: - AttachStderr: true - AttachStdin: false - AttachStdout: true - Cmd: - - "/bin/sh" - - "-c" - - "exit 9" - Domainname: "" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Healthcheck: - Test: ["CMD-SHELL", "exit 0"] - Hostname: "ba033ac44011" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - MacAddress: "" - NetworkDisabled: false - OpenStdin: false - StdinOnce: false - Tty: false - User: "" - Volumes: - /volumes/data: {} - WorkingDir: "" - StopSignal: "SIGTERM" - StopTimeout: 10 - Created: "2015-01-06T15:47:31.485331387Z" - Driver: "overlay2" - ExecIDs: - - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" - HostConfig: - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 0 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteIOps: - - {} - ContainerIDFile: "" - CpusetCpus: "" - CpusetMems: "" - CpuPercent: 80 - CpuShares: 0 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - Devices: [] - DeviceRequests: - - Driver: "nvidia" - Count: -1 - DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] - Capabilities: [["gpu", "nvidia", "compute"]] - Options: - property1: "string" - property2: "string" - IpcMode: "" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - OomKillDisable: false - OomScoreAdj: 500 - NetworkMode: "bridge" - PidMode: "" - PortBindings: {} - Privileged: false - ReadonlyRootfs: false - PublishAllPorts: false - RestartPolicy: - MaximumRetryCount: 2 - Name: "on-failure" - LogConfig: - Type: "json-file" - Sysctls: - net.ipv4.ip_forward: "1" - Ulimits: - - {} - VolumeDriver: "" - ShmSize: 67108864 - HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" - HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" - LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" - Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" - Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" - MountLabel: "" - Name: "/boring_euclid" - NetworkSettings: - Bridge: "" - SandboxID: "" - HairpinMode: false - LinkLocalIPv6Address: "" - LinkLocalIPv6PrefixLen: 0 - SandboxKey: "" - EndpointID: "" - Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - IPAddress: "" - IPPrefixLen: 0 - IPv6Gateway: "" - MacAddress: "" - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Path: "/bin/sh" - ProcessLabel: "" - ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" - RestartCount: 1 - State: - Error: "" - ExitCode: 9 - FinishedAt: "2015-01-06T15:47:32.080254511Z" - Health: - Status: "healthy" - FailingStreak: 0 - Log: - - Start: "2019-12-22T10:59:05.6385933Z" - End: "2019-12-22T10:59:05.8078452Z" - ExitCode: 0 - Output: "" - OOMKilled: false - Dead: false - Paused: false - Pid: 0 - Restarting: false - Running: true - StartedAt: "2015-01-06T15:47:32.072697474Z" - Status: "running" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" + $ref: "#/definitions/ContainerInspectResponse" 404: description: "no such container" schema: @@ -7463,54 +8023,7 @@ paths: 200: description: "no error" schema: - type: "object" - title: "ContainerTopResponse" - description: "OK response to ContainerTop operation" - properties: - Titles: - description: "The ps column titles" - type: "array" - items: - type: "string" - Processes: - description: | - Each process running in the container, where each is process - is an array of values corresponding to the titles. - type: "array" - items: - type: "array" - items: - type: "string" - examples: - application/json: - Titles: - - "UID" - - "PID" - - "PPID" - - "C" - - "STIME" - - "TTY" - - "TIME" - - "CMD" - Processes: - - - - "root" - - "13642" - - "882" - - "0" - - "17:03" - - "pts/0" - - "00:00:00" - - "/bin/bash" - - - - "root" - - "13735" - - "13642" - - "0" - - "17:06" - - "pts/0" - - "00:00:00" - - "sleep 10" + $ref: "#/definitions/ContainerTopResponse" 404: description: "no such container" schema: @@ -7720,99 +8233,7 @@ paths: 200: description: "no error" schema: - type: "object" - examples: - application/json: - read: "2015-01-08T22:57:31.547920715Z" - pids_stats: - current: 3 - networks: - eth0: - rx_bytes: 5338 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 36 - tx_bytes: 648 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 8 - eth5: - rx_bytes: 4641 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 26 - tx_bytes: 690 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 9 - memory_stats: - stats: - total_pgmajfault: 0 - cache: 0 - mapped_file: 0 - total_inactive_file: 0 - pgpgout: 414 - rss: 6537216 - total_mapped_file: 0 - writeback: 0 - unevictable: 0 - pgpgin: 477 - total_unevictable: 0 - pgmajfault: 0 - total_rss: 6537216 - total_rss_huge: 6291456 - total_writeback: 0 - total_inactive_anon: 0 - rss_huge: 6291456 - hierarchical_memory_limit: 67108864 - total_pgfault: 964 - total_active_file: 0 - active_anon: 6537216 - total_active_anon: 6537216 - total_pgpgout: 414 - total_cache: 0 - inactive_anon: 0 - active_file: 0 - pgfault: 964 - inactive_file: 0 - total_pgpgin: 477 - max_usage: 6651904 - usage: 6537216 - failcnt: 0 - limit: 67108864 - blkio_stats: {} - cpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24472255 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100215355 - usage_in_kernelmode: 30000000 - system_cpu_usage: 739306590000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - precpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24350896 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100093996 - usage_in_kernelmode: 30000000 - system_cpu_usage: 9492140000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 + $ref: "#/definitions/ContainerStatsResponse" 404: description: "no such container" schema: @@ -7876,10 +8297,12 @@ paths: type: "string" - name: "h" in: "query" + required: true description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" + required: true description: "Width of the TTY session in characters" type: "integer" tags: ["Container"] @@ -8041,14 +8464,7 @@ paths: 200: description: "The container has been updated." schema: - type: "object" - title: "ContainerUpdateResponse" - description: "OK response to ContainerUpdate operation" - properties: - Warnings: - type: "array" - items: - type: "string" + $ref: "#/definitions/ContainerUpdateResponse" 404: description: "no such container" schema: @@ -8955,10 +9371,29 @@ paths: operationId: "BuildPrune" parameters: - name: "keep-storage" + in: "query" + description: | + Amount of disk space in bytes to keep for cache + + > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". + > It is kept for backward compatibility and will be removed in API v1.49. + type: "integer" + format: "int64" + - name: "reserved-space" in: "query" description: "Amount of disk space in bytes to keep for cache" type: "integer" format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" - name: "all" in: "query" type: "boolean" @@ -9025,7 +9460,13 @@ paths: parameters: - name: "fromImage" in: "query" - description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. type: "string" - name: "fromSrc" in: "query" @@ -9119,6 +9560,12 @@ paths: description: "Image name or id" type: "string" required: true + - name: "manifests" + in: "query" + description: "Include Manifests in the image summary." + type: "boolean" + default: false + required: false tags: ["Image"] /images/{name}/history: get: @@ -9197,6 +9644,20 @@ paths: description: "Image name or ID" type: "string" required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /images/{name}/push: post: @@ -9244,6 +9705,19 @@ paths: all tags of the given image that are present in the local image store are pushed. type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` - name: "X-Registry-Auth" in: "header" description: | @@ -9253,11 +9727,6 @@ paths: details. type: "string" required: true - - name: "platform" - in: "query" - description: "Select a platform-specific manifest to be pushed. OCI platform (JSON encoded)" - type: "string" - x-nullable: true tags: ["Image"] /images/{name}/tag: post: @@ -9349,6 +9818,18 @@ paths: description: "Do not delete untagged parent images" type: "boolean" default: false + - name: "platforms" + in: "query" + description: | + Select platform-specific content to delete. + Multiple values are accepted. + Each platform is a OCI platform encoded as a JSON string. + type: "array" + items: + # This should be OCIPlatform + # but $ref is not supported for array in query in Swagger 2.0 + # $ref: "#/definitions/OCIPlatform" + type: "string" tags: ["Image"] /images/search: get: @@ -9553,7 +10034,7 @@ paths: type: "string" example: "OK" headers: - API-Version: + Api-Version: type: "string" description: "Max API Version the server supports" Builder-Version: @@ -9609,7 +10090,7 @@ paths: type: "string" example: "(empty)" headers: - API-Version: + Api-Version: type: "string" description: "Max API Version the server supports" Builder-Version: @@ -9648,7 +10129,7 @@ paths: 201: description: "no error" schema: - $ref: "#/definitions/IdResponse" + $ref: "#/definitions/IDResponse" 404: description: "no such container" schema: @@ -9906,13 +10387,9 @@ paths: ### Image tarball format - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. @@ -9942,6 +10419,16 @@ paths: description: "Image name or ID" type: "string" required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /images/get: get: @@ -9977,6 +10464,16 @@ paths: type: "array" items: type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /images/load: post: @@ -10009,6 +10506,16 @@ paths: description: "Suppress progress details during load." type: "boolean" default: false + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be load if the image is + multi-platform. + If not provided, the full multi-platform image will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /containers/{id}/exec: post: @@ -10023,7 +10530,7 @@ paths: 201: description: "no error" schema: - $ref: "#/definitions/IdResponse" + $ref: "#/definitions/IDResponse" 404: description: "no such container" schema: @@ -10065,6 +10572,7 @@ paths: items: type: "integer" minimum: 0 + example: [80, 64] DetachKeys: type: "string" description: | @@ -10151,9 +10659,11 @@ paths: Detach: type: "boolean" description: "Detach from the command." + example: false Tty: type: "boolean" description: "Allocate a pseudo-TTY." + example: true ConsoleSize: type: "array" description: "Initial console size, as an `[height, width]` array." @@ -10163,10 +10673,7 @@ paths: items: type: "integer" minimum: 0 - example: - Detach: false - Tty: true - ConsoleSize: [80, 64] + example: [80, 64] - name: "id" in: "path" description: "Exec instance ID" @@ -10203,10 +10710,12 @@ paths: type: "string" - name: "h" in: "query" + required: true description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" + required: true description: "Width of the TTY session in characters" type: "integer" tags: ["Exec"] @@ -10520,6 +11029,7 @@ paths: Created: "2016-10-19T06:21:00.416543526Z" Scope: "local" Driver: "bridge" + EnableIPv4: true EnableIPv6: false Internal: false Attachable: false @@ -10541,6 +11051,7 @@ paths: Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "null" + EnableIPv4: false EnableIPv6: false Internal: false Attachable: false @@ -10555,6 +11066,7 @@ paths: Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "host" + EnableIPv4: false EnableIPv6: false Internal: false Attachable: false @@ -10740,6 +11252,10 @@ paths: IPAM: description: "Optional custom IP scheme for the network." $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true EnableIPv6: description: "Enable IPv6 on the network." type: "boolean" @@ -10817,6 +11333,7 @@ paths: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" MacAddress: "02:42:ac:12:05:02" + Priority: 100 tags: ["Network"] /networks/{id}/disconnect: @@ -11622,6 +12139,7 @@ paths: example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" RemoteAddrs: - "node1:2377" JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" @@ -12492,7 +13010,7 @@ paths: 201: description: "no error" schema: - $ref: "#/definitions/IdResponse" + $ref: "#/definitions/IDResponse" 409: description: "name conflicts with an existing object" schema: @@ -12699,7 +13217,7 @@ paths: 201: description: "no error" schema: - $ref: "#/definitions/IdResponse" + $ref: "#/definitions/IDResponse" 409: description: "name conflicts with an existing object" schema: diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go index bf3463b9..931ae10a 100644 --- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -1,4 +1,4 @@ -package blkiodev // import "github.com/docker/docker/api/types/blkiodev" +package blkiodev import "fmt" diff --git a/vendor/github.com/docker/docker/api/types/build/build.go b/vendor/github.com/docker/docker/api/types/build/build.go new file mode 100644 index 00000000..c43a0e21 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/build/build.go @@ -0,0 +1,91 @@ +package build + +import ( + "io" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/registry" +) + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit BuilderVersion = "2" +) + +// Result contains the image id of a successful build. +type Result struct { + ID string +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*container.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]registry.AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Platform string + // Version specifies the version of the underlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} diff --git a/vendor/github.com/docker/docker/api/types/build/cache.go b/vendor/github.com/docker/docker/api/types/build/cache.go new file mode 100644 index 00000000..42c84045 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/build/cache.go @@ -0,0 +1,52 @@ +package build + +import ( + "time" + + "github.com/docker/docker/api/types/filters" +) + +// CacheRecord contains information about a build cache record. +type CacheRecord struct { + // ID is the unique ID of the build cache record. + ID string + // Parent is the ID of the parent build cache record. + // + // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. + Parent string `json:"Parent,omitempty"` + // Parents is the list of parent build cache record IDs. + Parents []string `json:" Parents,omitempty"` + // Type is the cache record type. + Type string + // Description is a description of the build-step that produced the build cache. + Description string + // InUse indicates if the build cache is in use. + InUse bool + // Shared indicates if the build cache is shared. + Shared bool + // Size is the amount of disk space used by the build cache (in bytes). + Size int64 + // CreatedAt is the date and time at which the build cache was created. + CreatedAt time.Time + // LastUsedAt is the date and time at which the build cache was last used. + LastUsedAt *time.Time + UsageCount int +} + +// CachePruneOptions hold parameters to prune the build cache. +type CachePruneOptions struct { + All bool + ReservedSpace int64 + MaxUsedSpace int64 + MinFreeSpace int64 + Filters filters.Args + + KeepStorage int64 // Deprecated: deprecated in API 1.48. +} + +// CachePruneReport contains the response for Engine API: +// POST "/build/prune" +type CachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} diff --git a/vendor/github.com/docker/docker/api/types/build/disk_usage.go b/vendor/github.com/docker/docker/api/types/build/disk_usage.go new file mode 100644 index 00000000..e969b6d6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/build/disk_usage.go @@ -0,0 +1,8 @@ +package build + +// CacheDiskUsage contains disk usage for the build cache. +type CacheDiskUsage struct { + TotalSize int64 + Reclaimable int64 + Items []*CacheRecord +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index df791f02..42fe03ec 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -1,17 +1,12 @@ -package types // import "github.com/docker/docker/api/types" +package types import ( "bufio" "context" - "io" "net" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" ) -// NewHijackedResponse intializes a HijackedResponse type +// NewHijackedResponse initializes a [HijackedResponse] type. func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} } @@ -51,173 +46,6 @@ func (h *HijackedResponse) CloseWrite() error { return nil } -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - NetworkMode string - ShmSize int64 - Dockerfile string - Ulimits []*container.Ulimit - // BuildArgs needs to be a *string instead of just a string so that - // we can tell the difference between "" (empty string) and no value - // at all (nil). See the parsing of buildArgs in - // api/server/router/build/build_routes.go for even more info. - BuildArgs map[string]*string - AuthConfigs map[string]registry.AuthConfig - Context io.Reader - Labels map[string]string - // squash the resulting image's layers to the parent - // preserves the original image and creates a new one from the parent with all - // the changes applied to a single layer - Squash bool - // CacheFrom specifies images that are used for matching cache. Images - // specified here do not need to have a valid parent chain to match cache. - CacheFrom []string - SecurityOpt []string - ExtraHosts []string // List of extra hosts - Target string - SessionID string - Platform string - // Version specifies the version of the underlying builder to use - Version BuilderVersion - // BuildID is an optional identifier that can be passed together with the - // build request. The same identifier can be used to gracefully cancel the - // build with the cancel request. - BuildID string - // Outputs defines configurations for exporting build results. Only supported - // in BuildKit mode - Outputs []ImageBuildOutput -} - -// ImageBuildOutput defines configuration for exporting a build result -type ImageBuildOutput struct { - Type string - Attrs map[string]string -} - -// BuilderVersion sets the version of underlying builder to use -type BuilderVersion string - -const ( - // BuilderV1 is the first generation builder in docker daemon - BuilderV1 BuilderVersion = "1" - // BuilderBuildKit is builder based on moby/buildkit project - BuilderBuildKit BuilderVersion = "2" -) - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func(context.Context) (string, error) - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// Values for RegistryAuthFrom in ServiceUpdateOptions -const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" -) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args - - // Status indicates whether the server should include the service task - // count of running and desired tasks. - Status bool -} - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} - // PluginRemoveOptions holds parameters to remove plugins. type PluginRemoveOptions struct { Force bool @@ -235,22 +63,22 @@ type PluginDisableOptions struct { // PluginInstallOptions holds parameters to install a plugin. type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - PrivilegeFunc RequestPrivilegeFunc + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error) Args []string } -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -type SwarmUnlockKeyResponse struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - // PluginCreateOptions hold all options to plugin create. type PluginCreateOptions struct { RepoName string diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/common/id_response.go similarity index 87% rename from vendor/github.com/docker/docker/api/types/id_response.go rename to vendor/github.com/docker/docker/api/types/common/id_response.go index 7592d2f8..22e8c60a 100644 --- a/vendor/github.com/docker/docker/api/types/id_response.go +++ b/vendor/github.com/docker/docker/api/types/common/id_response.go @@ -1,10 +1,10 @@ -package types +package common // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // IDResponse Response to an API call that returns just an Id -// swagger:model IdResponse +// swagger:model IDResponse type IDResponse struct { // The id of the newly created object. diff --git a/vendor/github.com/docker/docker/api/types/container/commit.go b/vendor/github.com/docker/docker/api/types/container/commit.go new file mode 100644 index 00000000..6fd1b0ea --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/commit.go @@ -0,0 +1,7 @@ +package container + +import "github.com/docker/docker/api/types/common" + +// CommitResponse response for the commit API call, containing the ID of the +// image that was produced. +type CommitResponse = common.IDResponse diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index d6b03e8b..05554165 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container import ( "time" diff --git a/vendor/github.com/docker/docker/api/types/container/container.go b/vendor/github.com/docker/docker/api/types/container/container.go index 711af12c..a191ca8b 100644 --- a/vendor/github.com/docker/docker/api/types/container/container.go +++ b/vendor/github.com/docker/docker/api/types/container/container.go @@ -4,8 +4,22 @@ import ( "io" "os" "time" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/storage" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) +// ContainerUpdateOKBody OK response to ContainerUpdate operation +// +// Deprecated: use [UpdateResponse]. This alias will be removed in the next release. +type ContainerUpdateOKBody = UpdateResponse + +// ContainerTopOKBody OK response to ContainerTop operation +// +// Deprecated: use [TopResponse]. This alias will be removed in the next release. +type ContainerTopOKBody = TopResponse + // PruneReport contains the response for Engine API: // POST "/containers/prune" type PruneReport struct { @@ -42,3 +56,133 @@ type StatsResponseReader struct { Body io.ReadCloser `json:"body"` OSType string `json:"ostype"` } + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + // Type is the type of mount, see `Type` definitions in + // github.com/docker/docker/api/types/mount.Type + Type mount.Type `json:",omitempty"` + + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name. + Name string `json:",omitempty"` + + // Source is the source location of the mount. + // + // For volumes, this contains the storage location of the volume (within + // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + // the source (host) part of the bind-mount. For `tmpfs` mount points, this + // field is empty. + Source string + + // Destination is the path relative to the container root (`/`) where the + // Source is mounted inside the container. + Destination string + + // Driver is the volume driver used to create the volume (if it is a volume). + Driver string `json:",omitempty"` + + // Mode is a comma separated list of options supplied by the user when + // creating the bind/volume mount. + // + // The default is platform-specific (`"z"` on Linux, empty on Windows). + Mode string + + // RW indicates whether the mount is mounted writable (read-write). + RW bool + + // Propagation describes how mounts are propagated from the host into the + // mount point, and vice-versa. Refer to the Linux kernel documentation + // for details: + // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // + // This field is not used on Windows. + Propagation mount.Propagation +} + +// State stores container's running state +// it's part of ContainerJSONBase and returned by "inspect" command +type State struct { + Status ContainerState // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// Summary contains response of Engine API: +// GET "/containers/json" +type Summary struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"` + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State ContainerState + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + Annotations map[string]string `json:",omitempty"` + } + NetworkSettings *NetworkSettingsSummary + Mounts []MountPoint +} + +// ContainerJSONBase contains response of Engine API GET "/containers/{name:.*}/json" +// for API version 1.18 and older. +// +// TODO(thaJeztah): combine ContainerJSONBase and InspectResponse into a single struct. +// The split between ContainerJSONBase (ContainerJSONBase) and InspectResponse (InspectResponse) +// was done in commit 6deaa58ba5f051039643cedceee97c8695e2af74 (https://github.com/moby/moby/pull/13675). +// ContainerJSONBase contained all fields for API < 1.19, and InspectResponse +// held fields that were added in API 1.19 and up. Given that the minimum +// supported API version is now 1.24, we no longer use the separate type. +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *State + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *HostConfig + GraphDriver storage.DriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// InspectResponse is the response for the GET "/containers/{name:.*}/json" +// endpoint. +type InspectResponse struct { + *ContainerJSONBase + Mounts []MountPoint + Config *Config + NetworkSettings *NetworkSettings + // ImageManifestDescriptor is the descriptor of a platform-specific manifest of the image used to create the container. + ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go deleted file mode 100644 index 63381da3..00000000 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerTopOKBody OK response to ContainerTop operation -// swagger:model ContainerTopOKBody -type ContainerTopOKBody struct { - - // Each process running in the container, where each is process - // is an array of values corresponding to the titles. - // - // Required: true - Processes [][]string `json:"Processes"` - - // The ps column titles - // Required: true - Titles []string `json:"Titles"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go deleted file mode 100644 index c10f175e..00000000 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ /dev/null @@ -1,16 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerUpdateOKBody OK response to ContainerUpdate operation -// swagger:model ContainerUpdateOKBody -type ContainerUpdateOKBody struct { - - // warnings - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/disk_usage.go b/vendor/github.com/docker/docker/api/types/container/disk_usage.go new file mode 100644 index 00000000..05b6cbe9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/disk_usage.go @@ -0,0 +1,8 @@ +package container + +// DiskUsage contains disk usage for containers. +type DiskUsage struct { + TotalSize int64 + Reclaimable int64 + Items []*Summary +} diff --git a/vendor/github.com/docker/docker/api/types/container/exec.go b/vendor/github.com/docker/docker/api/types/container/exec.go index 96093eb5..e455cd27 100644 --- a/vendor/github.com/docker/docker/api/types/container/exec.go +++ b/vendor/github.com/docker/docker/api/types/container/exec.go @@ -1,5 +1,13 @@ package container +import "github.com/docker/docker/api/types/common" + +// ExecCreateResponse is the response for a successful exec-create request. +// It holds the ID of the exec that was created. +// +// TODO(thaJeztah): make this a distinct type. +type ExecCreateResponse = common.IDResponse + // ExecOptions is a small subset of the Config struct that holds the configuration // for the exec feature of docker. type ExecOptions struct { @@ -10,11 +18,13 @@ type ExecOptions struct { AttachStdin bool // Attach the standard input, makes possible user interaction AttachStderr bool // Attach the standard error AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode DetachKeys string // Escape keys for detach Env []string // Environment variables WorkingDir string // Working directory Cmd []string // Execution commands and args + + // Deprecated: the Detach field is not used, and will be removed in a future release. + Detach bool } // ExecStartOptions is a temp struct used by execStart diff --git a/vendor/github.com/docker/docker/api/types/container/health.go b/vendor/github.com/docker/docker/api/types/container/health.go new file mode 100644 index 00000000..96e91cc8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/health.go @@ -0,0 +1,50 @@ +package container + +import ( + "fmt" + "strings" + "time" +) + +// HealthStatus is a string representation of the container's health. +// +// It currently is an alias for string, but may become a distinct type in future. +type HealthStatus = string + +// Health states +const ( + NoHealthcheck HealthStatus = "none" // Indicates there is no healthcheck + Starting HealthStatus = "starting" // Starting indicates that the container is not yet ready + Healthy HealthStatus = "healthy" // Healthy indicates that the container is running correctly + Unhealthy HealthStatus = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status HealthStatus // Status is one of [Starting], [Healthy] or [Unhealthy]. + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +var validHealths = []string{ + NoHealthcheck, Starting, Healthy, Unhealthy, +} + +// ValidateHealthStatus checks if the provided string is a valid +// container [HealthStatus]. +func ValidateHealthStatus(s HealthStatus) error { + switch s { + case NoHealthcheck, Starting, Healthy, Unhealthy: + return nil + default: + return errInvalidParameter{error: fmt.Errorf("invalid value for health (%s): must be one of %s", s, strings.Join(validHealths, ", "))} + } +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go index 03648fb7..f63f049c 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container import ( "errors" @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" + "github.com/docker/go-units" ) // CgroupnsMode represents the cgroup namespace mode of the container @@ -145,7 +145,7 @@ func (n NetworkMode) IsDefault() bool { // IsPrivate indicates whether container uses its private network stack. func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) + return !n.IsHost() && !n.IsContainer() } // IsContainer indicates whether container uses a container network stack. @@ -230,7 +230,7 @@ type PidMode string // IsPrivate indicates whether the container uses its own new pid namespace. func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) + return !n.IsHost() && !n.IsContainer() } // IsHost indicates whether the container uses the host's pid namespace. diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go index cdee49ea..cd6a7a9b 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -1,6 +1,6 @@ //go:build !windows -package container // import "github.com/docker/docker/api/types/container" +package container import "github.com/docker/docker/api/types/network" diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go index f0854554..db63e190 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container import "github.com/docker/docker/api/types/network" diff --git a/vendor/github.com/docker/docker/api/types/container/network_settings.go b/vendor/github.com/docker/docker/api/types/container/network_settings.go new file mode 100644 index 00000000..afec0e54 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/network_settings.go @@ -0,0 +1,56 @@ +package container + +import ( + "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" +) + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds networking state for a container when inspecting it. +type NetworkSettingsBase struct { + Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag. + SandboxID string // SandboxID uniquely represents a container's network stack + SandboxKey string // SandboxKey identifies the sandbox + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + + // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + // + // Deprecated: This field is never set and will be removed in a future release. + HairpinMode bool + // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + // + // Deprecated: This field is never set and will be removed in a future release. + LinkLocalIPv6Address string + // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + // + // Deprecated: This field is never set and will be removed in a future release. + LinkLocalIPv6PrefixLen int + SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release. + SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release. +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// NetworkSettingsSummary provides a summary of container's networks +// in /containers/json +type NetworkSettingsSummary struct { + Networks map[string]*network.EndpointSettings +} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/container/port.go similarity index 96% rename from vendor/github.com/docker/docker/api/types/port.go rename to vendor/github.com/docker/docker/api/types/container/port.go index d9123474..895043cf 100644 --- a/vendor/github.com/docker/docker/api/types/port.go +++ b/vendor/github.com/docker/docker/api/types/container/port.go @@ -1,4 +1,4 @@ -package types +package container // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command diff --git a/vendor/github.com/docker/docker/api/types/container/state.go b/vendor/github.com/docker/docker/api/types/container/state.go new file mode 100644 index 00000000..78d5c4fe --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/state.go @@ -0,0 +1,64 @@ +package container + +import ( + "fmt" + "strings" +) + +// ContainerState is a string representation of the container's current state. +// +// It currently is an alias for string, but may become a distinct type in the future. +type ContainerState = string + +const ( + StateCreated ContainerState = "created" // StateCreated indicates the container is created, but not (yet) started. + StateRunning ContainerState = "running" // StateRunning indicates that the container is running. + StatePaused ContainerState = "paused" // StatePaused indicates that the container's current state is paused. + StateRestarting ContainerState = "restarting" // StateRestarting indicates that the container is currently restarting. + StateRemoving ContainerState = "removing" // StateRemoving indicates that the container is being removed. + StateExited ContainerState = "exited" // StateExited indicates that the container exited. + StateDead ContainerState = "dead" // StateDead indicates that the container failed to be deleted. Containers in this state are attempted to be cleaned up when the daemon restarts. +) + +var validStates = []ContainerState{ + StateCreated, StateRunning, StatePaused, StateRestarting, StateRemoving, StateExited, StateDead, +} + +// ValidateContainerState checks if the provided string is a valid +// container [ContainerState]. +func ValidateContainerState(s ContainerState) error { + switch s { + case StateCreated, StateRunning, StatePaused, StateRestarting, StateRemoving, StateExited, StateDead: + return nil + default: + return errInvalidParameter{error: fmt.Errorf("invalid value for state (%s): must be one of %s", s, strings.Join(validStates, ", "))} + } +} + +// StateStatus is used to return container wait results. +// Implements exec.ExitCode interface. +// This type is needed as State include a sync.Mutex field which make +// copying it unsafe. +type StateStatus struct { + exitCode int + err error +} + +// ExitCode returns current exitcode for the state. +func (s StateStatus) ExitCode() int { + return s.exitCode +} + +// Err returns current error for the state. Returns nil if the container had +// exited on its own. +func (s StateStatus) Err() error { + return s.err +} + +// NewStateStatus returns a new StateStatus with the given exit code and error. +func NewStateStatus(exitCode int, err error) StateStatus { + return StateStatus{ + exitCode: exitCode, + err: err, + } +} diff --git a/vendor/github.com/docker/docker/api/types/container/stats.go b/vendor/github.com/docker/docker/api/types/container/stats.go index 3b3fb131..3bfeb484 100644 --- a/vendor/github.com/docker/docker/api/types/container/stats.go +++ b/vendor/github.com/docker/docker/api/types/container/stats.go @@ -148,7 +148,15 @@ type PidsStats struct { } // Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { +// +// Deprecated: use [StatsResponse] instead. This type will be removed in the next release. +type Stats = StatsResponse + +// StatsResponse aggregates all types of stats of one container. +type StatsResponse struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + // Common stats Read time.Time `json:"read"` PreRead time.Time `json:"preread"` @@ -162,20 +170,8 @@ type Stats struct { StorageStats StorageStats `json:"storage_stats,omitempty"` // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` -} - -// StatsResponse is newly used Networks. -// -// TODO(thaJeztah): unify with [Stats]. This wrapper was to account for pre-api v1.21 changes, see https://github.com/moby/moby/commit/d3379946ec96fb6163cb8c4517d7d5a067045801 -type StatsResponse struct { - Stats - - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + Networks map[string]NetworkStats `json:"networks,omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/container/top_response.go b/vendor/github.com/docker/docker/api/types/container/top_response.go new file mode 100644 index 00000000..b4bae5ef --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/top_response.go @@ -0,0 +1,18 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// TopResponse ContainerTopResponse +// +// Container "top" response. +// swagger:model TopResponse +type TopResponse struct { + + // Each process running in the container, where each process + // is an array of values corresponding to the titles. + Processes [][]string `json:"Processes"` + + // The ps column titles + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/update_response.go b/vendor/github.com/docker/docker/api/types/container/update_response.go new file mode 100644 index 00000000..e2b5bf5a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/update_response.go @@ -0,0 +1,14 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// UpdateResponse ContainerUpdateResponse +// +// Response for a successful container-update. +// swagger:model UpdateResponse +type UpdateResponse struct { + + // Warnings encountered when updating the container. + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go index cd8311f9..64820fe3 100644 --- a/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container // WaitCondition is a type used to specify a container state for which // to wait. diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index e225df4e..952c0ff2 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,4 +1,5 @@ -package events // import "github.com/docker/docker/api/types/events" +package events + import "github.com/docker/docker/api/types/filters" // Type is used for event-types. @@ -111,11 +112,14 @@ type Actor struct { // Message represents the information an event contains type Message struct { - // Deprecated information from JSONMessage. + // Deprecated: use Action instead. + // Information from JSONMessage. // With data only in container events. - Status string `json:"status,omitempty"` // Deprecated: use Action instead. - ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. - From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. + Status string `json:"status,omitempty"` + // Deprecated: use Actor.ID instead. + ID string `json:"id,omitempty"` + // Deprecated: use Actor.Attributes["image"] instead. + From string `json:"from,omitempty"` Type Type Action Action diff --git a/vendor/github.com/docker/docker/api/types/filters/errors.go b/vendor/github.com/docker/docker/api/types/filters/errors.go index f52f6944..b8a690d6 100644 --- a/vendor/github.com/docker/docker/api/types/filters/errors.go +++ b/vendor/github.com/docker/docker/api/types/filters/errors.go @@ -22,16 +22,3 @@ func (e invalidFilter) Error() string { // InvalidParameter marks this error as ErrInvalidParameter func (e invalidFilter) InvalidParameter() {} - -// unreachableCode is an error indicating that the code path was not expected to be reached. -type unreachableCode struct { - Filter string - Value []string -} - -// System marks this error as ErrSystem -func (e unreachableCode) System() {} - -func (e unreachableCode) Error() string { - return fmt.Sprintf("unreachable code reached for filter: %q with values: %s", e.Filter, e.Value) -} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 0914b2a4..86f4bdb2 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -2,7 +2,7 @@ Package filters provides tools for encoding a mapping of keys to a set of multiple values. */ -package filters // import "github.com/docker/docker/api/types/filters" +package filters import ( "encoding/json" @@ -200,7 +200,6 @@ func (args Args) Match(field, source string) bool { // Error is not nil only if the filter values are not valid boolean or are conflicting. func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { fieldValues, ok := args.fields[key] - if !ok { return defaultValue, nil } @@ -211,20 +210,11 @@ func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { isFalse := fieldValues["0"] || fieldValues["false"] isTrue := fieldValues["1"] || fieldValues["true"] - - conflicting := isFalse && isTrue - invalid := !isFalse && !isTrue - - if conflicting || invalid { + if isFalse == isTrue { + // Either no or conflicting truthy/falsy value were provided return defaultValue, &invalidFilter{key, args.Get(key)} - } else if isFalse { - return false, nil - } else if isTrue { - return true, nil } - - // This code shouldn't be reached. - return defaultValue, &unreachableCode{Filter: key, Value: args.Get(key)} + return isTrue, nil } // ExactMatch returns true if the source matches exactly one of the values. diff --git a/vendor/github.com/docker/docker/api/types/image/disk_usage.go b/vendor/github.com/docker/docker/api/types/image/disk_usage.go new file mode 100644 index 00000000..b29d925c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/disk_usage.go @@ -0,0 +1,8 @@ +package image + +// DiskUsage contains disk usage for images. +type DiskUsage struct { + TotalSize int64 + Reclaimable int64 + Items []*Summary +} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go index e302bb0a..a6cdab84 100644 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ b/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -1,4 +1,4 @@ -package image // import "github.com/docker/docker/api/types/image" +package image // ---------------------------------------------------------------------------- // Code generated by `swagger generate operation`. DO NOT EDIT. diff --git a/vendor/github.com/docker/docker/api/types/image/image_inspect.go b/vendor/github.com/docker/docker/api/types/image/image_inspect.go new file mode 100644 index 00000000..3bdb4742 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/image_inspect.go @@ -0,0 +1,142 @@ +package image + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/storage" + dockerspec "github.com/moby/docker-image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string `json:",omitempty"` + Layers []string `json:",omitempty"` +} + +// InspectResponse contains response of Engine API: +// GET "/images/{name:.*}/json" +type InspectResponse struct { + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + ID string `json:"Id"` + + // RepoTags is a list of image names/tags in the local image cache that + // reference this image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + RepoTags []string + + // RepoDigests is a list of content-addressable digests of locally available + // image manifests that the image is referenced from. Multiple manifests can + // refer to the same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + RepoDigests []string + + // Parent is the ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + Parent string + + // Comment is an optional message that can be set when committing or + // importing the image. + Comment string + + // Created is the date and time at which the image was created, formatted in + // RFC 3339 nano-seconds (time.RFC3339Nano). + // + // This information is only available if present in the image, + // and omitted otherwise. + Created string `json:",omitempty"` + + // Container is the ID of the container that was used to create the image. + // + // Depending on how the image was created, this field may be empty. + // + // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. + Container string `json:",omitempty"` + + // ContainerConfig is an optional field containing the configuration of the + // container that was last committed when creating the image. + // + // Previous versions of Docker builder used this field to store build cache, + // and it is not in active use anymore. + // + // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. + ContainerConfig *container.Config `json:",omitempty"` + + // DockerVersion is the version of Docker that was used to build the image. + // + // Depending on how the image was created, this field may be empty. + DockerVersion string + + // Author is the name of the author that was specified when committing the + // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. + Author string + Config *dockerspec.DockerOCIImageConfig + + // Architecture is the hardware CPU architecture that the image runs on. + Architecture string + + // Variant is the CPU architecture variant (presently ARM-only). + Variant string `json:",omitempty"` + + // OS is the Operating System the image is built to run on. + Os string + + // OsVersion is the version of the Operating System the image is built to + // run on (especially for Windows). + OsVersion string `json:",omitempty"` + + // Size is the total size of the image including all layers it is composed of. + Size int64 + + // VirtualSize is the total size of the image including all layers it is + // composed of. + // + // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + VirtualSize int64 `json:"VirtualSize,omitempty"` + + // GraphDriver holds information about the storage driver used to store the + // container's and image's filesystem. + GraphDriver storage.DriverData + + // RootFS contains information about the image's RootFS, including the + // layer IDs. + RootFS RootFS + + // Metadata of the image in the local cache. + // + // This information is local to the daemon, and not part of the image itself. + Metadata Metadata + + // Descriptor is the OCI descriptor of the image target. + // It's only set if the daemon provides a multi-platform image store. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"` + + // Manifests is a list of image manifests available in this image. It + // provides a more detailed view of the platform-specific image manifests or + // other image-attached data like build attestations. + // + // Only available if the daemon provides a multi-platform image store, the client + // requests manifests AND does not request a specific platform. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Manifests []ManifestSummary `json:"Manifests,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index 923ebe5a..9e33a42f 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -38,7 +38,7 @@ type PullOptions struct { // authentication header value in base64 encoded format, or an error if the // privilege request fails. // - // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. + // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. PrivilegeFunc func(context.Context) (string, error) Platform string } @@ -53,7 +53,7 @@ type PushOptions struct { // authentication header value in base64 encoded format, or an error if the // privilege request fails. // - // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. + // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. PrivilegeFunc func(context.Context) (string, error) // Platform is an optional field that selects a specific platform to push @@ -75,6 +75,8 @@ type ListOptions struct { SharedSize bool // ContainerCount indicates whether container count should be computed. + // + // Deprecated: This field has been unused and is no longer required and will be removed in a future version. ContainerCount bool // Manifests indicates whether the image manifests should be returned. @@ -83,6 +85,40 @@ type ListOptions struct { // RemoveOptions holds parameters to remove images. type RemoveOptions struct { + Platforms []ocispec.Platform Force bool PruneChildren bool } + +// HistoryOptions holds parameters to get image history. +type HistoryOptions struct { + // Platform from the manifest list to use for history. + Platform *ocispec.Platform +} + +// LoadOptions holds parameters to load images. +type LoadOptions struct { + // Quiet suppresses progress output + Quiet bool + + // Platforms selects the platforms to load if the image is a + // multi-platform image and has multiple variants. + Platforms []ocispec.Platform +} + +type InspectOptions struct { + // Manifests returns the image manifests. + Manifests bool + + // Platform selects the specific platform of a multi-platform image to inspect. + // + // This option is only available for API version 1.49 and up. + Platform *ocispec.Platform +} + +// SaveOptions holds parameters to save images. +type SaveOptions struct { + // Platforms selects the platforms to save if the image is a + // multi-platform image and has multiple variants. + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go index e87e216a..c5ae6ab9 100644 --- a/vendor/github.com/docker/docker/api/types/image/summary.go +++ b/vendor/github.com/docker/docker/api/types/image/summary.go @@ -1,5 +1,7 @@ package image +import ocispec "github.com/opencontainers/image-spec/specs-go/v1" + type Summary struct { // Number of containers using this image. Includes both stopped and running @@ -42,6 +44,13 @@ type Summary struct { // Required: true ParentID string `json:"ParentId"` + // Descriptor is the OCI descriptor of the image target. + // It's only set if the daemon provides a multi-platform image store. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"` + // Manifests is a list of image manifests available in this image. It // provides a more detailed view of the platform-specific image manifests or // other image-attached data like build attestations. diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index c68dcf65..090d436c 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -1,4 +1,4 @@ -package mount // import "github.com/docker/docker/api/types/mount" +package mount import ( "os" @@ -19,6 +19,8 @@ const ( TypeNamedPipe Type = "npipe" // TypeCluster is the type for Swarm Cluster Volumes. TypeCluster Type = "cluster" + // TypeImage is the type for mounting another image's filesystem + TypeImage Type = "image" ) // Mount represents a mount (volume). @@ -34,6 +36,7 @@ type Mount struct { BindOptions *BindOptions `json:",omitempty"` VolumeOptions *VolumeOptions `json:",omitempty"` + ImageOptions *ImageOptions `json:",omitempty"` TmpfsOptions *TmpfsOptions `json:",omitempty"` ClusterOptions *ClusterOptions `json:",omitempty"` } @@ -100,6 +103,10 @@ type VolumeOptions struct { DriverConfig *Driver `json:",omitempty"` } +type ImageOptions struct { + Subpath string `json:",omitempty"` +} + // Driver represents a volume driver. type Driver struct { Name string `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go index 0fbb40b3..167ac70a 100644 --- a/vendor/github.com/docker/docker/api/types/network/endpoint.go +++ b/vendor/github.com/docker/docker/api/types/network/endpoint.go @@ -19,6 +19,12 @@ type EndpointSettings struct { // generated address). MacAddress string DriverOpts map[string]string + + // GwPriority determines which endpoint will provide the default gateway + // for the container. The endpoint with the highest priority will be used. + // If multiple endpoints have the same priority, they are lexicographically + // sorted based on their network name, and the one that sorts first is picked. + GwPriority int // Operational data NetworkID string EndpointID string diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index c8db97a7..4a0cb479 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -1,4 +1,4 @@ -package network // import "github.com/docker/docker/api/types/network" +package network import ( "time" @@ -33,6 +33,7 @@ type CreateRequest struct { type CreateOptions struct { Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). + EnableIPv4 *bool `json:",omitempty"` // EnableIPv4 represents whether to enable IPv4. EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6. IPAM *IPAM // IPAM is the network's IP Address Management. Internal bool // Internal represents if the network is used internal only. @@ -76,7 +77,8 @@ type Inspect struct { Created time.Time // Created is the time the network created Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + EnableIPv4 bool // EnableIPv4 represents whether IPv4 is enabled + EnableIPv6 bool // EnableIPv6 represents whether IPv6 is enabled IPAM IPAM // IPAM is the network's IP Address Management Internal bool // Internal represents if the network is used internal only Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go index 60d1fb5a..18f743fc 100644 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -1,4 +1,4 @@ -package types // import "github.com/docker/docker/api/types" +package types import ( "encoding/json" diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go index 8e383f6e..70f73200 100644 --- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go @@ -1,17 +1,30 @@ -package registry // import "github.com/docker/docker/api/types/registry" +package registry + import ( + "context" "encoding/base64" "encoding/json" + "fmt" "io" "strings" - - "github.com/pkg/errors" ) // AuthHeader is the name of the header used to send encoded registry // authorization credentials for registry operations (push/pull). const AuthHeader = "X-Registry-Auth" +// RequestAuthConfig is a function interface that clients can supply +// to retry operations after getting an authorization error. +// +// The function must return the [AuthHeader] value ([AuthConfig]), encoded +// in base64url format ([RFC4648, section 5]), which can be decoded by +// [DecodeAuthConfig]. +// +// It must return an error if the privilege request fails. +// +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 +type RequestAuthConfig func(context.Context) (string, error) + // AuthConfig contains authorization information for connecting to a Registry. type AuthConfig struct { Username string `json:"username,omitempty"` @@ -85,7 +98,7 @@ func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) { } func invalid(err error) error { - return errInvalidParameter{errors.Wrap(err, "invalid X-Registry-Auth header")} + return errInvalidParameter{fmt.Errorf("invalid X-Registry-Auth header: %w", err)} } type errInvalidParameter struct{ error } diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go index f0a2113e..42cac443 100644 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -1,4 +1,4 @@ -package registry // import "github.com/docker/docker/api/types/registry" +package registry // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 75ee07b1..9319c964 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -1,4 +1,7 @@ -package registry // import "github.com/docker/docker/api/types/registry" +// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: +//go:build go1.23 + +package registry import ( "encoding/json" @@ -9,11 +12,32 @@ import ( // ServiceConfig stores daemon registry services configuration. type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet - AllowNondistributableArtifactsHostnames []string - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string + AllowNondistributableArtifactsCIDRs []*NetIPNet `json:"AllowNondistributableArtifactsCIDRs,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release. + AllowNondistributableArtifactsHostnames []string `json:"AllowNondistributableArtifactsHostnames,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release. + + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string + + // ExtraFields is for internal use to include deprecated fields on older API versions. + ExtraFields map[string]any `json:"-"` +} + +// MarshalJSON implements a custom marshaler to include legacy fields +// in API responses. +func (sc *ServiceConfig) MarshalJSON() ([]byte, error) { + type tmp ServiceConfig + base, err := json.Marshal((*tmp)(sc)) + if err != nil { + return nil, err + } + var merged map[string]any + _ = json.Unmarshal(base, &merged) + + for k, v := range sc.ExtraFields { + merged[k] = v + } + return json.Marshal(merged) } // NetIPNet is the net.IPNet type, which can be marshalled and @@ -31,15 +55,17 @@ func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { } // UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) error { var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } + if err := json.Unmarshal(b, &ipnetStr); err != nil { + return err + } + _, cidr, err := net.ParseCIDR(ipnetStr) + if err != nil { + return err } - return + *ipnet = NetIPNet(*cidr) + return nil } // IndexInfo contains information about a registry diff --git a/vendor/github.com/docker/docker/api/types/registry/search.go b/vendor/github.com/docker/docker/api/types/registry/search.go index a0a1eec5..994ca4c6 100644 --- a/vendor/github.com/docker/docker/api/types/registry/search.go +++ b/vendor/github.com/docker/docker/api/types/registry/search.go @@ -10,11 +10,12 @@ import ( type SearchOptions struct { RegistryAuth string - // PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can - // supply to retry operations after getting an authorization error. + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. // - // It must return the registry authentication header value in base64 - // format, or an error if the privilege request fails. + // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. PrivilegeFunc func(context.Context) (string, error) Filters filters.Args Limit int diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/storage/driver_data.go similarity index 75% rename from vendor/github.com/docker/docker/api/types/graph_driver_data.go rename to vendor/github.com/docker/docker/api/types/storage/driver_data.go index ce3deb33..009e2130 100644 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ b/vendor/github.com/docker/docker/api/types/storage/driver_data.go @@ -1,13 +1,13 @@ -package types +package storage // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// GraphDriverData Information about the storage driver used to store the container's and +// DriverData Information about the storage driver used to store the container's and // image's filesystem. // -// swagger:model GraphDriverData -type GraphDriverData struct { +// swagger:model DriverData +type DriverData struct { // Low-level storage metadata, provided as key/value pairs. // diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go index 82921ceb..bad493fb 100644 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -1,4 +1,4 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" +package strslice import "encoding/json" diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go index 5ded7dba..b42812e0 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -1,4 +1,4 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( "strconv" diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go index 16202ccc..80a6ffdb 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -1,6 +1,10 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm -import "os" +import ( + "os" + + "github.com/docker/docker/api/types/filters" +) // Config represents a config. type Config struct { @@ -12,6 +16,12 @@ type Config struct { // ConfigSpec represents a config specification from a config in swarm type ConfigSpec struct { Annotations + + // Data is the data to store as a config. + // + // The maximum allowed size is 1000KB, as defined in [MaxConfigSize]. + // + // [MaxConfigSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize Data []byte `json:",omitempty"` // Templating controls whether and how to evaluate the config payload as @@ -38,3 +48,15 @@ type ConfigReference struct { ConfigID string ConfigName string } + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 30e3de70..f9416bac 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -1,4 +1,4 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( "time" diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go index 98ef3284..4b880723 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -1,4 +1,4 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( "github.com/docker/docker/api/types/network" diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go index bb98d5ee..2018a031 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -1,4 +1,6 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm + +import "github.com/docker/docker/api/types/filters" // Node represents a node. type Node struct { @@ -137,3 +139,13 @@ const ( type Topology struct { Segments map[string]string `json:",omitempty"` } + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go index 0c77403c..8a28320f 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -1,4 +1,4 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm // RuntimeType is the type of runtime used for the TaskSpec type RuntimeType string diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go index 292bd7af..90e572cf 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -1,3 +1,3 @@ //go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto -package runtime // import "github.com/docker/docker/api/types/swarm/runtime" +package runtime diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go index d5213ec9..d9482ab5 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -1,6 +1,10 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm -import "os" +import ( + "os" + + "github.com/docker/docker/api/types/filters" +) // Secret represents a secret. type Secret struct { @@ -12,8 +16,22 @@ type Secret struct { // SecretSpec represents a secret specification from a secret in swarm type SecretSpec struct { Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Data is the data to store as a secret. It must be empty if a + // [Driver] is used, in which case the data is loaded from an external + // secret store. The maximum allowed size is 500KB, as defined in + // [MaxSecretSize]. + // + // This field is only used to create the secret, and is not returned + // by other endpoints. + // + // [MaxSecretSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize + Data []byte `json:",omitempty"` + + // Driver is the name of the secrets driver used to fetch the secret's + // value from an external secret store. If not set, the default built-in + // store is used. + Driver *Driver `json:",omitempty"` // Templating controls whether and how to evaluate the secret payload as // a template. If it is not set, no templating is used. @@ -34,3 +52,15 @@ type SecretReference struct { SecretID string SecretName string } + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go index 5b6d5ec1..56c660c1 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -1,6 +1,10 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm -import "time" +import ( + "time" + + "github.com/docker/docker/api/types/filters" +) // Service represents a service. type Service struct { @@ -200,3 +204,69 @@ type JobStatus struct { // Swarm manager. LastExecution time.Time `json:",omitempty"` } + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args + + // Status indicates whether the server should include the service task + // count of running and desired tasks. + Status bool +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 1b4be6ff..38f3e666 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -1,4 +1,4 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( "time" @@ -235,3 +235,10 @@ type UpdateFlags struct { RotateManagerToken bool RotateManagerUnlockKey bool } + +// UnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type UnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index ad3eeca0..4dc95e8b 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -1,8 +1,9 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( "time" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm/runtime" ) @@ -223,3 +224,8 @@ type VolumeAttachment struct { // in the ContainerSpec, that this volume fulfills. Target string `json:",omitempty"` } + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/system/disk_usage.go b/vendor/github.com/docker/docker/api/types/system/disk_usage.go new file mode 100644 index 00000000..99078cf1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/system/disk_usage.go @@ -0,0 +1,17 @@ +package system + +import ( + "github.com/docker/docker/api/types/build" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/volume" +) + +// DiskUsage contains response of Engine API for API 1.49 and greater: +// GET "/system/df" +type DiskUsage struct { + Images *image.DiskUsage + Containers *container.DiskUsage + Volumes *volume.DiskUsage + BuildCache *build.CacheDiskUsage +} diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go index c66a2afb..047639ed 100644 --- a/vendor/github.com/docker/docker/api/types/system/info.go +++ b/vendor/github.com/docker/docker/api/types/system/info.go @@ -29,8 +29,6 @@ type Info struct { CPUSet bool PidsLimit bool IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` Debug bool NFd int OomKillDisable bool @@ -73,7 +71,9 @@ type Info struct { SecurityOptions []string ProductLicense string `json:",omitempty"` DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"` CDISpecDirs []string + DiscoveredDevices []DeviceInfo `json:",omitempty"` Containerd *ContainerdInfo `json:",omitempty"` @@ -137,8 +137,13 @@ type PluginsInfo struct { // Commit holds the Git-commit (SHA1) that a binary was built from, as reported // in the version-string of external tools, such as containerd, or runC. type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. + // ID is the actual commit ID or version of external tool. + ID string + + // Expected is the commit ID of external tool expected by dockerd as set at build time. + // + // Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions. + Expected string `json:",omitempty"` } // NetworkAddressPool is a temp struct used by [Info] struct. @@ -146,3 +151,20 @@ type NetworkAddressPool struct { Base string Size int } + +// FirewallInfo describes the firewall backend. +type FirewallInfo struct { + // Driver is the name of the firewall backend driver. + Driver string `json:"Driver"` + // Info is a list of label/value pairs, containing information related to the firewall. + Info [][2]string `json:"Info,omitempty"` +} + +// DeviceInfo represents a discoverable device from a device driver. +type DeviceInfo struct { + // Source indicates the origin device driver. + Source string `json:"Source"` + // ID is the unique identifier for the device. + // Example: CDI FQDN like "vendor.com/gpu=0", or other driver-specific device ID + ID string `json:"ID"` +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go index cab5c32e..0e1df38a 100644 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -1,4 +1,4 @@ -package time // import "github.com/docker/docker/api/types/time" +package time import ( "fmt" @@ -30,7 +30,7 @@ func GetTimestamp(value string, reference time.Time) (string, error) { var format string // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + parseInLocation := !strings.ContainsAny(value, "zZ+") && strings.Count(value, "-") != 3 if strings.Contains(value, ".") { if parseInLocation { @@ -105,23 +105,23 @@ func GetTimestamp(value string, reference time.Time) (string, error) { // since := time.Unix(seconds, nanoseconds) // // returns seconds as defaultSeconds if value == "" -func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, err error) { +func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, _ error) { if value == "" { return defaultSeconds, 0, nil } return parseTimestamp(value) } -func parseTimestamp(value string) (sec int64, nsec int64, err error) { +func parseTimestamp(value string) (seconds int64, nanoseconds int64, _ error) { s, n, ok := strings.Cut(value, ".") - sec, err = strconv.ParseInt(s, 10, 64) + sec, err := strconv.ParseInt(s, 10, 64) if err != nil { return sec, 0, err } if !ok { return sec, 0, nil } - nsec, err = strconv.ParseInt(n, 10, 64) + nsec, err := strconv.ParseInt(n, 10, 64) if err != nil { return sec, nsec, err } diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index fe99b743..8bbadeb2 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -1,16 +1,11 @@ -package types // import "github.com/docker/docker/api/types" +package types import ( - "time" - + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/volume" - "github.com/docker/go-connections/nat" ) const ( @@ -21,152 +16,13 @@ const ( MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" ) -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string `json:",omitempty"` - Layers []string `json:",omitempty"` -} - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - // ID is the content-addressable ID of an image. - // - // This identifier is a content-addressable digest calculated from the - // image's configuration (which includes the digests of layers used by - // the image). - // - // Note that this digest differs from the `RepoDigests` below, which - // holds digests of image manifests that reference the image. - ID string `json:"Id"` - - // RepoTags is a list of image names/tags in the local image cache that - // reference this image. - // - // Multiple image tags can refer to the same image, and this list may be - // empty if no tags reference the image, in which case the image is - // "untagged", in which case it can still be referenced by its ID. - RepoTags []string - - // RepoDigests is a list of content-addressable digests of locally available - // image manifests that the image is referenced from. Multiple manifests can - // refer to the same image. - // - // These digests are usually only available if the image was either pulled - // from a registry, or if the image was pushed to a registry, which is when - // the manifest is generated and its digest calculated. - RepoDigests []string - - // Parent is the ID of the parent image. - // - // Depending on how the image was created, this field may be empty and - // is only set for images that were built/created locally. This field - // is empty if the image was pulled from an image registry. - Parent string - - // Comment is an optional message that can be set when committing or - // importing the image. - Comment string - - // Created is the date and time at which the image was created, formatted in - // RFC 3339 nano-seconds (time.RFC3339Nano). - // - // This information is only available if present in the image, - // and omitted otherwise. - Created string `json:",omitempty"` - - // Container is the ID of the container that was used to create the image. - // - // Depending on how the image was created, this field may be empty. - // - // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - Container string `json:",omitempty"` - - // ContainerConfig is an optional field containing the configuration of the - // container that was last committed when creating the image. - // - // Previous versions of Docker builder used this field to store build cache, - // and it is not in active use anymore. - // - // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - ContainerConfig *container.Config `json:",omitempty"` - - // DockerVersion is the version of Docker that was used to build the image. - // - // Depending on how the image was created, this field may be empty. - DockerVersion string - - // Author is the name of the author that was specified when committing the - // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. - Author string - Config *container.Config - - // Architecture is the hardware CPU architecture that the image runs on. - Architecture string - - // Variant is the CPU architecture variant (presently ARM-only). - Variant string `json:",omitempty"` - - // OS is the Operating System the image is built to run on. - Os string - - // OsVersion is the version of the Operating System the image is built to - // run on (especially for Windows). - OsVersion string `json:",omitempty"` - - // Size is the total size of the image including all layers it is composed of. - Size int64 - - // VirtualSize is the total size of the image including all layers it is - // composed of. - // - // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - VirtualSize int64 `json:"VirtualSize,omitempty"` - - // GraphDriver holds information about the storage driver used to store the - // container's and image's filesystem. - GraphDriver GraphDriverData - - // RootFS contains information about the image's RootFS, including the - // layer IDs. - RootFS RootFS - - // Metadata of the image in the local cache. - // - // This information is local to the daemon, and not part of the image itself. - Metadata image.Metadata -} - -// Container contains response of Engine API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - Annotations map[string]string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - // Ping contains response of Engine API: // GET "/_ping" type Ping struct { APIVersion string OSType string Experimental bool - BuilderVersion BuilderVersion + BuilderVersion build.BuilderVersion // SwarmStatus provides information about the current swarm status of the // engine, obtained from the "Swarm" header in the API response. @@ -205,176 +61,6 @@ type Version struct { BuildTime string `json:",omitempty"` } -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerJSONBase contains response of Engine API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` // Deprecated: Node was only propagated by Docker Swarm standalone API. It sill be removed in the next release. - Name string - RestartCount int - Driver string - Platform string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds networking state for a container when inspecting it. -type NetworkSettingsBase struct { - Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag. - SandboxID string // SandboxID uniquely represents a container's network stack - SandboxKey string // SandboxKey identifies the sandbox - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - - // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - // - // Deprecated: This field is never set and will be removed in a future release. - HairpinMode bool - // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - // - // Deprecated: This field is never set and will be removed in a future release. - LinkLocalIPv6Address string - // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - // - // Deprecated: This field is never set and will be removed in a future release. - LinkLocalIPv6PrefixLen int - SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release. - SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release. -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -type MountPoint struct { - // Type is the type of mount, see `Type` definitions in - // github.com/docker/docker/api/types/mount.Type - Type mount.Type `json:",omitempty"` - - // Name is the name reference to the underlying data defined by `Source` - // e.g., the volume name. - Name string `json:",omitempty"` - - // Source is the source location of the mount. - // - // For volumes, this contains the storage location of the volume (within - // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains - // the source (host) part of the bind-mount. For `tmpfs` mount points, this - // field is empty. - Source string - - // Destination is the path relative to the container root (`/`) where the - // Source is mounted inside the container. - Destination string - - // Driver is the volume driver used to create the volume (if it is a volume). - Driver string `json:",omitempty"` - - // Mode is a comma separated list of options supplied by the user when - // creating the bind/volume mount. - // - // The default is platform-specific (`"z"` on Linux, empty on Windows). - Mode string - - // RW indicates whether the mount is mounted writable (read-write). - RW bool - - // Propagation describes how mounts are propagated from the host into the - // mount point, and vice-versa. Refer to the Linux kernel documentation - // for details: - // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt - // - // This field is not used on Windows. - Propagation mount.Propagation -} - // DiskUsageObject represents an object type used for disk usage query filtering. type DiskUsageObject string @@ -401,43 +87,12 @@ type DiskUsageOptions struct { type DiskUsage struct { LayersSize int64 Images []*image.Summary - Containers []*Container + Containers []*container.Summary Volumes []*volume.Volume - BuildCache []*BuildCache + BuildCache []*build.CacheRecord BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. } -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -type BuildCachePruneReport struct { - CachesDeleted []string - SpaceReclaimed uint64 -} - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -type SecretCreateResponse struct { - // ID is the id of the created secret. - ID string -} - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -type ConfigCreateResponse struct { - // ID is the id of the created config. - ID string -} - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} - // PushResult contains the tag, manifest digest, and manifest size from the // push. It's used to signal this information to the trust code in the client // so it can sign the manifest if necessary. @@ -446,42 +101,3 @@ type PushResult struct { Digest string Size int } - -// BuildResult contains the image id of a successful build -type BuildResult struct { - ID string -} - -// BuildCache contains information about a build cache record. -type BuildCache struct { - // ID is the unique ID of the build cache record. - ID string - // Parent is the ID of the parent build cache record. - // - // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. - Parent string `json:"Parent,omitempty"` - // Parents is the list of parent build cache record IDs. - Parents []string `json:" Parents,omitempty"` - // Type is the cache record type. - Type string - // Description is a description of the build-step that produced the build cache. - Description string - // InUse indicates if the build cache is in use. - InUse bool - // Shared indicates if the build cache is shared. - Shared bool - // Size is the amount of disk space used by the build cache (in bytes). - Size int64 - // CreatedAt is the date and time at which the build cache was created. - CreatedAt time.Time - // LastUsedAt is the date and time at which the build cache was last used. - LastUsedAt *time.Time - UsageCount int -} - -// BuildCachePruneOptions hold parameters to prune the build cache -type BuildCachePruneOptions struct { - All bool - KeepStorage int64 - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go index 43ffe104..8456a456 100644 --- a/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -1,210 +1,241 @@ package types import ( + "context" + + "github.com/docker/docker/api/types/build" + "github.com/docker/docker/api/types/common" "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/api/types/storage" + "github.com/docker/docker/api/types/swarm" ) -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" +// IDResponse Response to an API call that returns just an Id. // -// Deprecated: use [image.PruneReport]. -type ImagesPruneReport = image.PruneReport +// Deprecated: use either [container.CommitResponse] or [container.ExecCreateResponse]. It will be removed in the next release. +type IDResponse = common.IDResponse -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune". +// ContainerJSONBase contains response of Engine API GET "/containers/{name:.*}/json" +// for API version 1.18 and older. // -// Deprecated: use [volume.PruneReport]. -type VolumesPruneReport = volume.PruneReport +// Deprecated: use [container.InspectResponse] or [container.ContainerJSONBase]. It will be removed in the next release. +type ContainerJSONBase = container.ContainerJSONBase -// NetworkCreateRequest is the request message sent to the server for network create call. +// ContainerJSON is the response for the GET "/containers/{name:.*}/json" +// endpoint. // -// Deprecated: use [network.CreateRequest]. -type NetworkCreateRequest = network.CreateRequest +// Deprecated: use [container.InspectResponse]. It will be removed in the next release. +type ContainerJSON = container.InspectResponse -// NetworkCreate is the expected body of the "create network" http request message +// Container contains response of Engine API: +// GET "/containers/json" // -// Deprecated: use [network.CreateOptions]. -type NetworkCreate = network.CreateOptions +// Deprecated: use [container.Summary]. +type Container = container.Summary -// NetworkListOptions holds parameters to filter the list of networks with. +// ContainerState stores container's running state // -// Deprecated: use [network.ListOptions]. -type NetworkListOptions = network.ListOptions +// Deprecated: use [container.State]. +type ContainerState = container.State -// NetworkCreateResponse is the response message sent by the server for network create call. +// NetworkSettings exposes the network settings in the api. // -// Deprecated: use [network.CreateResponse]. -type NetworkCreateResponse = network.CreateResponse +// Deprecated: use [container.NetworkSettings]. +type NetworkSettings = container.NetworkSettings -// NetworkInspectOptions holds parameters to inspect network. +// NetworkSettingsBase holds networking state for a container when inspecting it. // -// Deprecated: use [network.InspectOptions]. -type NetworkInspectOptions = network.InspectOptions +// Deprecated: use [container.NetworkSettingsBase]. +type NetworkSettingsBase = container.NetworkSettingsBase -// NetworkConnect represents the data to be used to connect a container to the network +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. // -// Deprecated: use [network.ConnectOptions]. -type NetworkConnect = network.ConnectOptions +// Deprecated: use [container.DefaultNetworkSettings]. +type DefaultNetworkSettings = container.DefaultNetworkSettings -// NetworkDisconnect represents the data to be used to disconnect a container from the network +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json. // -// Deprecated: use [network.DisconnectOptions]. -type NetworkDisconnect = network.DisconnectOptions +// Deprecated: use [container.NetworkSettingsSummary]. +type SummaryNetworkSettings = container.NetworkSettingsSummary + +// Health states +const ( + NoHealthcheck = container.NoHealthcheck // Deprecated: use [container.NoHealthcheck]. + Starting = container.Starting // Deprecated: use [container.Starting]. + Healthy = container.Healthy // Deprecated: use [container.Healthy]. + Unhealthy = container.Unhealthy // Deprecated: use [container.Unhealthy]. +) -// EndpointResource contains network resources allocated and used for a container in a network. +// Health stores information about the container's healthcheck results. // -// Deprecated: use [network.EndpointResource]. -type EndpointResource = network.EndpointResource +// Deprecated: use [container.Health]. +type Health = container.Health -// NetworkResource is the body of the "get network" http response message/ +// HealthcheckResult stores information about a single run of a healthcheck probe. // -// Deprecated: use [network.Inspect] or [network.Summary] (for list operations). -type NetworkResource = network.Inspect +// Deprecated: use [container.HealthcheckResult]. +type HealthcheckResult = container.HealthcheckResult -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. // -// Deprecated: use [network.PruneReport]. -type NetworksPruneReport = network.PruneReport +// Deprecated: use [container.MountPoint]. +type MountPoint = container.MountPoint -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. +// Port An open port on a container // -// Deprecated: use [container.ExecOptions]. -type ExecConfig = container.ExecOptions +// Deprecated: use [container.Port]. +type Port = container.Port -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package +// GraphDriverData Information about the storage driver used to store the container's and +// image's filesystem. // -// Deprecated: use [container.ExecStartOptions] or [container.ExecAttachOptions]. -type ExecStartCheck = container.ExecStartOptions +// Deprecated: use [storage.DriverData]. +type GraphDriverData = storage.DriverData -// ContainerExecInspect holds information returned by exec inspect. +// RootFS returns Image's RootFS description including the layer IDs. // -// Deprecated: use [container.ExecInspect]. -type ContainerExecInspect = container.ExecInspect +// Deprecated: use [image.RootFS]. +type RootFS = image.RootFS -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" // -// Deprecated: use [container.PruneReport]. -type ContainersPruneReport = container.PruneReport +// Deprecated: use [image.InspectResponse]. +type ImageInspect = image.InspectResponse -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. +// RequestPrivilegeFunc is a function interface that clients can supply to +// retry operations after getting an authorization error. +// This function returns the registry authentication header value in base64 +// format, or an error if the privilege request fails. // -// Deprecated: use [container.PathStat]. -type ContainerPathStat = container.PathStat +// Deprecated: moved to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. +type RequestPrivilegeFunc func(context.Context) (string, error) -// CopyToContainerOptions holds information -// about files to copy into a container. +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. // -// Deprecated: use [container.CopyToContainerOptions], -type CopyToContainerOptions = container.CopyToContainerOptions +// Deprecated: use [swarm.SecretCreateResponse]. +type SecretCreateResponse = swarm.SecretCreateResponse -// ContainerStats contains response of Engine API: -// GET "/stats" +// SecretListOptions holds parameters to list secrets // -// Deprecated: use [container.StatsResponseReader]. -type ContainerStats = container.StatsResponseReader +// Deprecated: use [swarm.SecretListOptions]. +type SecretListOptions = swarm.SecretListOptions -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. // -// Deprecated: use [container.ThrottlingData]. -type ThrottlingData = container.ThrottlingData +// Deprecated: use [swarm.ConfigCreateResponse]. +type ConfigCreateResponse = swarm.ConfigCreateResponse -// CPUUsage stores All CPU stats aggregated since container inception. +// ConfigListOptions holds parameters to list configs // -// Deprecated: use [container.CPUUsage]. -type CPUUsage = container.CPUUsage +// Deprecated: use [swarm.ConfigListOptions]. +type ConfigListOptions = swarm.ConfigListOptions -// CPUStats aggregates and wraps all CPU related info of container +// NodeListOptions holds parameters to list nodes with. // -// Deprecated: use [container.CPUStats]. -type CPUStats = container.CPUStats +// Deprecated: use [swarm.NodeListOptions]. +type NodeListOptions = swarm.NodeListOptions -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. +// NodeRemoveOptions holds parameters to remove nodes with. // -// Deprecated: use [container.MemoryStats]. -type MemoryStats = container.MemoryStats +// Deprecated: use [swarm.NodeRemoveOptions]. +type NodeRemoveOptions = swarm.NodeRemoveOptions -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. +// TaskListOptions holds parameters to list tasks with. // -// Deprecated: use [container.BlkioStatEntry]. -type BlkioStatEntry = container.BlkioStatEntry +// Deprecated: use [swarm.TaskListOptions]. +type TaskListOptions = swarm.TaskListOptions -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. +// ServiceCreateOptions contains the options to use when creating a service. // -// Deprecated: use [container.BlkioStats]. -type BlkioStats = container.BlkioStats +// Deprecated: use [swarm.ServiceCreateOptions]. +type ServiceCreateOptions = swarm.ServiceCreateOptions -// StorageStats is the disk I/O stats for read/write on Windows. +// ServiceUpdateOptions contains the options to be used for updating services. // -// Deprecated: use [container.StorageStats]. -type StorageStats = container.StorageStats +// Deprecated: use [swarm.ServiceCreateOptions]. +type ServiceUpdateOptions = swarm.ServiceUpdateOptions -// NetworkStats aggregates the network stats of one container +const ( + RegistryAuthFromSpec = swarm.RegistryAuthFromSpec // Deprecated: use [swarm.RegistryAuthFromSpec]. + RegistryAuthFromPreviousSpec = swarm.RegistryAuthFromPreviousSpec // Deprecated: use [swarm.RegistryAuthFromPreviousSpec]. +) + +// ServiceListOptions holds parameters to list services with. // -// Deprecated: use [container.NetworkStats]. -type NetworkStats = container.NetworkStats +// Deprecated: use [swarm.ServiceListOptions]. +type ServiceListOptions = swarm.ServiceListOptions -// PidsStats contains the stats of a container's pids +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. // -// Deprecated: use [container.PidsStats]. -type PidsStats = container.PidsStats +// Deprecated: use [swarm.ServiceInspectOptions]. +type ServiceInspectOptions = swarm.ServiceInspectOptions -// Stats is Ultimate struct aggregating all types of stats of one container +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey // -// Deprecated: use [container.Stats]. -type Stats = container.Stats +// Deprecated: use [swarm.UnlockKeyResponse]. +type SwarmUnlockKeyResponse = swarm.UnlockKeyResponse -// StatsJSON is newly used Networks +// BuildCache contains information about a build cache record. // -// Deprecated: use [container.StatsResponse]. -type StatsJSON = container.StatsResponse +// Deprecated: deprecated in API 1.49. Use [build.CacheRecord] instead. +type BuildCache = build.CacheRecord -// EventsOptions holds parameters to filter events with. +// BuildCachePruneOptions hold parameters to prune the build cache // -// Deprecated: use [events.ListOptions]. -type EventsOptions = events.ListOptions +// Deprecated: use [build.CachePruneOptions]. +type BuildCachePruneOptions = build.CachePruneOptions -// ImageSearchOptions holds parameters to search images with. +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" // -// Deprecated: use [registry.SearchOptions]. -type ImageSearchOptions = registry.SearchOptions +// Deprecated: use [build.CachePruneReport]. +type BuildCachePruneReport = build.CachePruneReport -// ImageImportSource holds source information for ImageImport +// BuildResult contains the image id of a successful build/ // -// Deprecated: use [image.ImportSource]. -type ImageImportSource image.ImportSource +// Deprecated: use [build.Result]. +type BuildResult = build.Result -// ImageLoadResponse returns information to the client about a load process. +// ImageBuildOptions holds the information +// necessary to build images. // -// Deprecated: use [image.LoadResponse]. -type ImageLoadResponse = image.LoadResponse +// Deprecated: use [build.ImageBuildOptions]. +type ImageBuildOptions = build.ImageBuildOptions -// ContainerNode stores information about the node that a container -// is running on. It's only used by the Docker Swarm standalone API. +// ImageBuildOutput defines configuration for exporting a build result // -// Deprecated: ContainerNode was used for the classic Docker Swarm standalone API. It will be removed in the next release. -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} +// Deprecated: use [build.ImageBuildOutput]. +type ImageBuildOutput = build.ImageBuildOutput + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +// +// Deprecated: use [build.ImageBuildResponse]. +type ImageBuildResponse = build.ImageBuildResponse + +// BuilderVersion sets the version of underlying builder to use +// +// Deprecated: use [build.BuilderVersion]. +type BuilderVersion = build.BuilderVersion + +const ( + // BuilderV1 is the first generation builder in docker daemon + // + // Deprecated: use [build.BuilderV1]. + BuilderV1 = build.BuilderV1 + // BuilderBuildKit is builder based on moby/buildkit project + // + // Deprecated: use [build.BuilderBuildKit]. + BuilderBuildKit = build.BuilderBuildKit +) diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go index 621725a3..1a0325c7 100644 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -1,4 +1,4 @@ -package versions // import "github.com/docker/docker/api/types/versions" +package versions import ( "strconv" diff --git a/vendor/github.com/docker/docker/api/types/volume/disk_usage.go b/vendor/github.com/docker/docker/api/types/volume/disk_usage.go new file mode 100644 index 00000000..3d716c6e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/disk_usage.go @@ -0,0 +1,8 @@ +package volume + +// DiskUsage contains disk usage for volumes. +type DiskUsage struct { + TotalSize int64 + Reclaimable int64 + Items []*Volume +} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go index 0b9645e0..875524fb 100644 --- a/vendor/github.com/docker/docker/api/types/volume/options.go +++ b/vendor/github.com/docker/docker/api/types/volume/options.go @@ -1,4 +1,4 @@ -package volume // import "github.com/docker/docker/api/types/volume" +package volume import "github.com/docker/docker/api/types/filters" diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go index f958f80a..c26ed44c 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volume_update.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_update.go @@ -1,4 +1,4 @@ -package volume // import "github.com/docker/docker/api/types/volume" +package volume // UpdateOptions is configuration to update a Volume with. type UpdateOptions struct { diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go index b76bf366..a5eeb817 100644 --- a/vendor/github.com/docker/docker/client/build_cancel.go +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -10,7 +10,7 @@ func (cli *Client) BuildCancel(ctx context.Context, id string) error { query := url.Values{} query.Set("id", id) - serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) - ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go index 1a830f41..6f0f59e3 100644 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -6,38 +6,49 @@ import ( "net/url" "strconv" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/filters" "github.com/pkg/errors" ) // BuildCachePrune requests the daemon to delete unused cache data -func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { +func (cli *Client) BuildCachePrune(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error) { if err := cli.NewVersionError(ctx, "1.31", "build prune"); err != nil { return nil, err } - report := types.BuildCachePruneReport{} - query := url.Values{} if opts.All { query.Set("all", "1") } - query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage))) + + if opts.KeepStorage != 0 { + query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage))) + } + if opts.ReservedSpace != 0 { + query.Set("reserved-space", strconv.Itoa(int(opts.ReservedSpace))) + } + if opts.MaxUsedSpace != 0 { + query.Set("max-used-space", strconv.Itoa(int(opts.MaxUsedSpace))) + } + if opts.MinFreeSpace != 0 { + query.Set("min-free-space", strconv.Itoa(int(opts.MinFreeSpace))) + } f, err := filters.ToJSON(opts.Filters) if err != nil { return nil, errors.Wrap(err, "prune could not marshal filters option") } query.Set("filters", f) - serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + report := build.CachePruneReport{} + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { return nil, errors.Wrap(err, "error retrieving disk usage") } diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/checkpoint.go similarity index 72% rename from vendor/github.com/docker/docker/client/interface_experimental.go rename to vendor/github.com/docker/docker/client/checkpoint.go index c585c104..d020574c 100644 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ b/vendor/github.com/docker/docker/client/checkpoint.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -6,11 +6,11 @@ import ( "github.com/docker/docker/api/types/checkpoint" ) -type apiClientExperimental interface { - CheckpointAPIClient -} - -// CheckpointAPIClient defines API client methods for the checkpoints +// CheckpointAPIClient defines API client methods for the checkpoints. +// +// Experimental: checkpoint and restore is still an experimental feature, +// and only available if the daemon is running with experimental features +// enabled. type CheckpointAPIClient interface { CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error CheckpointDelete(ctx context.Context, container string, options checkpoint.DeleteOptions) error diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go index 9746d288..961a5fe6 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -7,8 +7,13 @@ import ( ) // CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error { - resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) +func (cli *Client) CheckpointCreate(ctx context.Context, containerID string, options checkpoint.CreateOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/checkpoints", nil, options, nil) ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go index b968c2b2..4c51b25f 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,6 +9,11 @@ import ( // CheckpointDelete deletes the checkpoint with the given name from the given container func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options checkpoint.DeleteOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} if options.CheckpointDir != "" { query.Set("dir", options.CheckpointDir) diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 8feb1f3f..8164c766 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -23,6 +23,6 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options return checkpoints, err } - err = json.NewDecoder(resp.body).Decode(&checkpoints) + err = json.NewDecoder(resp.Body).Decode(&checkpoints) return checkpoints, err } diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 60d91bc6..d6e014dd 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -2,7 +2,7 @@ Package client is a Go client for the Docker Engine API. For more information about the Engine API, see the documentation: -https://docs.docker.com/engine/api/ +https://docs.docker.com/reference/api/engine/ # Usage @@ -39,7 +39,7 @@ For example, to list running containers (the equivalent of "docker ps"): } } */ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -59,7 +59,6 @@ import ( "github.com/docker/go-connections/sockets" "github.com/pkg/errors" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - "go.opentelemetry.io/otel/trace" ) // DummyHost is a hostname used for local communication. @@ -99,6 +98,9 @@ const DummyHost = "api.moby.localhost" // recent version before negotiation was introduced. const fallbackAPIVersion = "1.24" +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} + // Client is the API client that performs all operations // against a docker server. type Client struct { @@ -138,7 +140,7 @@ type Client struct { // negotiateLock is used to single-flight the version negotiation process negotiateLock sync.Mutex - tp trace.TracerProvider + traceOpts []otelhttp.Option // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections). // Store the original transport as the http.Client transport will be wrapped with tracing libs. @@ -200,6 +202,12 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) { client: client, proto: hostURL.Scheme, addr: hostURL.Host, + + traceOpts: []otelhttp.Option{ + otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { + return req.Method + " " + req.URL.Path + }), + }, } for _, op := range ops { @@ -227,13 +235,7 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) { } } - c.client.Transport = otelhttp.NewTransport( - c.client.Transport, - otelhttp.WithTracerProvider(c.tp), - otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { - return req.Method + " " + req.URL.Path - }), - ) + c.client.Transport = otelhttp.NewTransport(c.client.Transport, c.traceOpts...) return c, nil } @@ -247,6 +249,14 @@ func (cli *Client) tlsConfig() *tls.Config { func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { transport := &http.Transport{} + // Necessary to prevent long-lived processes using the + // client from leaking connections due to idle connections + // not being released. + // TODO: see if we can also address this from the server side, + // or in go-connections. + // see: https://github.com/moby/moby/issues/45539 + transport.MaxIdleConns = 6 + transport.IdleConnTimeout = 30 * time.Second err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) if err != nil { return nil, err @@ -296,8 +306,7 @@ func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) s var apiPath string _ = cli.checkVersion(ctx) if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = path.Join(cli.basePath, "/v"+v, p) + apiPath = path.Join(cli.basePath, "/v"+strings.TrimPrefix(cli.version, "v"), p) } else { apiPath = path.Join(cli.basePath, p) } @@ -442,6 +451,10 @@ func (cli *Client) dialerFromTransport() func(context.Context, string, string) ( // // ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014 func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return cli.dialer() +} + +func (cli *Client) dialer() func(context.Context) (net.Conn, error) { return func(ctx context.Context) (net.Conn, error) { if dialFn := cli.dialerFromTransport(); dialFn != nil { return dialFn(ctx, cli.proto, cli.addr) diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/client_interfaces.go similarity index 77% rename from vendor/github.com/docker/docker/client/interface.go rename to vendor/github.com/docker/docker/client/client_interfaces.go index cc60a5d1..df7aad43 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/client_interfaces.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -7,6 +7,7 @@ import ( "net/http" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" @@ -20,17 +21,23 @@ import ( ) // CommonAPIClient is the common methods between stable and experimental versions of APIClient. -type CommonAPIClient interface { +// +// Deprecated: use [APIClient] instead. This type will be an alias for [APIClient] in the next release, and removed after. +type CommonAPIClient = stableAPIClient + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + stableAPIClient + CheckpointAPIClient // CheckpointAPIClient is still experimental. +} + +type stableAPIClient interface { ConfigAPIClient ContainerAPIClient DistributionAPIClient ImageAPIClient - NodeAPIClient NetworkAPIClient PluginAPIClient - ServiceAPIClient - SwarmAPIClient - SecretAPIClient SystemAPIClient VolumeAPIClient ClientVersion() string @@ -39,27 +46,43 @@ type CommonAPIClient interface { ServerVersion(ctx context.Context) (types.Version, error) NegotiateAPIVersion(ctx context.Context) NegotiateAPIVersionPing(types.Ping) - DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) + HijackDialer Dialer() func(context.Context) (net.Conn, error) Close() error + SwarmManagementAPIClient +} + +// SwarmManagementAPIClient defines all methods for managing Swarm-specific +// objects. +type SwarmManagementAPIClient interface { + SwarmAPIClient + NodeAPIClient + ServiceAPIClient + SecretAPIClient + ConfigAPIClient +} + +// HijackDialer defines methods for a hijack dialer. +type HijackDialer interface { + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) } // ContainerAPIClient defines API client methods for the containers type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) + ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (container.CommitResponse, error) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) ContainerExecAttach(ctx context.Context, execID string, options container.ExecAttachOptions) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) + ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (container.ExecCreateResponse, error) ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error ContainerExecStart(ctx context.Context, execID string, options container.ExecStartOptions) error ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerInspect(ctx context.Context, container string) (container.InspectResponse, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (container.InspectResponse, []byte, error) ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) + ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error) ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) ContainerPause(ctx context.Context, container string) error ContainerRemove(ctx context.Context, container string, options container.RemoveOptions) error @@ -71,9 +94,9 @@ type ContainerAPIClient interface { ContainerStatsOneShot(ctx context.Context, container string) (container.StatsResponseReader, error) ContainerStart(ctx context.Context, container string, options container.StartOptions) error ContainerStop(ctx context.Context, container string, options container.StopOptions) error - ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) + ContainerTop(ctx context.Context, container string, arguments []string) (container.TopResponse, error) ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.UpdateResponse, error) ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, container.PathStat, error) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options container.CopyToContainerOptions) error @@ -87,22 +110,34 @@ type DistributionAPIClient interface { // ImageAPIClient defines API client methods for the images type ImageAPIClient interface { - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) + ImageBuild(ctx context.Context, context io.Reader, options build.ImageBuildOptions) (build.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error) BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error) ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error) ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error) ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) ImageTag(ctx context.Context, image, ref string) error ImagesPrune(ctx context.Context, pruneFilter filters.Args) (image.PruneReport, error) + + ImageInspect(ctx context.Context, image string, _ ...ImageInspectOption) (image.InspectResponse, error) + ImageHistory(ctx context.Context, image string, _ ...ImageHistoryOption) ([]image.HistoryResponseItem, error) + ImageLoad(ctx context.Context, input io.Reader, _ ...ImageLoadOption) (image.LoadResponse, error) + ImageSave(ctx context.Context, images []string, _ ...ImageSaveOption) (io.ReadCloser, error) + + ImageAPIClientDeprecated +} + +// ImageAPIClientDeprecated defines deprecated methods of the ImageAPIClient. +type ImageAPIClientDeprecated interface { + // ImageInspectWithRaw returns the image information and its raw representation. + // + // Deprecated: Use [Client.ImageInspect] instead. Raw response can be obtained using the [ImageInspectWithRawResponse] option. + ImageInspectWithRaw(ctx context.Context, image string) (image.InspectResponse, []byte, error) } // NetworkAPIClient defines API client methods for the networks @@ -120,8 +155,8 @@ type NetworkAPIClient interface { // NodeAPIClient defines API client methods for the nodes type NodeAPIClient interface { NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) - NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) - NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeList(ctx context.Context, options swarm.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options swarm.NodeRemoveOptions) error NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error } @@ -141,22 +176,22 @@ type PluginAPIClient interface { // ServiceAPIClient defines API client methods for the services type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) - ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options swarm.ServiceInspectOptions) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options swarm.ServiceListOptions) ([]swarm.Service, error) ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) - TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) + TaskList(ctx context.Context, options swarm.TaskListOptions) ([]swarm.Task, error) } // SwarmAPIClient defines API client methods for the swarm type SwarmAPIClient interface { SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error - SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmGetUnlockKey(ctx context.Context) (swarm.UnlockKeyResponse, error) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error SwarmLeave(ctx context.Context, force bool) error SwarmInspect(ctx context.Context) (swarm.Swarm, error) @@ -185,8 +220,8 @@ type VolumeAPIClient interface { // SecretAPIClient defines API client methods for secrets type SecretAPIClient interface { - SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) - SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretList(ctx context.Context, options swarm.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (swarm.SecretCreateResponse, error) SecretRemove(ctx context.Context, id string) error SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error @@ -194,8 +229,8 @@ type SecretAPIClient interface { // ConfigAPIClient defines API client methods for configs type ConfigAPIClient interface { - ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) - ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigList(ctx context.Context, options swarm.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (swarm.ConfigCreateResponse, error) ConfigRemove(ctx context.Context, id string) error ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 9fe78ea4..e5b921b4 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -1,6 +1,6 @@ //go:build !windows -package client // import "github.com/docker/docker/client" +package client // DefaultDockerHost defines OS-specific default host if the DOCKER_HOST // (EnvOverrideHost) environment variable is unset or empty. diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go index 56572d1a..19b954b2 100644 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client // DefaultDockerHost defines OS-specific default host if the DOCKER_HOST // (EnvOverrideHost) environment variable is unset or empty. diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go index 3deb4a8e..a39168e2 100644 --- a/vendor/github.com/docker/docker/client/config_create.go +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -1,16 +1,15 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" ) // ConfigCreate creates a new config. -func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { - var response types.ConfigCreateResponse +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (swarm.ConfigCreateResponse, error) { + var response swarm.ConfigCreateResponse if err := cli.NewVersionError(ctx, "1.30", "config create"); err != nil { return response, err } @@ -20,6 +19,6 @@ func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (t return response, err } - err = json.NewDecoder(resp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go index 2c6c7cb3..a9f0a8b0 100644 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" @@ -11,8 +11,9 @@ import ( // ConfigInspectWithRaw returns the config information with raw data func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { - if id == "" { - return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} + id, err := trimID("contig", id) + if err != nil { + return swarm.Config{}, nil, err } if err := cli.NewVersionError(ctx, "1.30", "config inspect"); err != nil { return swarm.Config{}, nil, err @@ -23,7 +24,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C return swarm.Config{}, nil, err } - body, err := io.ReadAll(resp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return swarm.Config{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go index 14dd3813..6f8a1c21 100644 --- a/vendor/github.com/docker/docker/client/config_list.go +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -1,17 +1,16 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" ) // ConfigList returns the list of configs. -func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { +func (cli *Client) ConfigList(ctx context.Context, options swarm.ConfigListOptions) ([]swarm.Config, error) { if err := cli.NewVersionError(ctx, "1.30", "config list"); err != nil { return nil, err } @@ -33,6 +32,6 @@ func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptio } var configs []swarm.Config - err = json.NewDecoder(resp.body).Decode(&configs) + err = json.NewDecoder(resp.Body).Decode(&configs) return configs, err } diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go index d05b0113..99d33b1c 100644 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -1,9 +1,13 @@ -package client // import "github.com/docker/docker/client" +package client import "context" // ConfigRemove removes a config. func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + id, err := trimID("config", id) + if err != nil { + return err + } if err := cli.NewVersionError(ctx, "1.30", "config remove"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go index 6995861d..9bc137f7 100644 --- a/vendor/github.com/docker/docker/client/config_update.go +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,6 +9,10 @@ import ( // ConfigUpdate attempts to update a config func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + id, err := trimID("config", id) + if err != nil { + return err + } if err := cli.NewVersionError(ctx, "1.30", "config update"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index 6a32e5f6..1fb3493e 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -33,7 +33,12 @@ import ( // // You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this // stream. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) { +func (cli *Client) ContainerAttach(ctx context.Context, containerID string, options container.AttachOptions) (types.HijackedResponse, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return types.HijackedResponse{}, err + } + query := url.Values{} if options.Stream { query.Set("stream", "1") @@ -54,7 +59,7 @@ func (cli *Client) ContainerAttach(ctx context.Context, container string, option query.Set("logs", "1") } - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, http.Header{ + return cli.postHijacked(ctx, "/containers/"+containerID+"/attach", query, nil, http.Header{ "Content-Type": {"text/plain"}, }) } diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index 26b3f091..2b5b9852 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -7,32 +7,36 @@ import ( "net/url" "github.com/distribution/reference" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" ) // ContainerCommit applies changes to a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) { +func (cli *Client) ContainerCommit(ctx context.Context, containerID string, options container.CommitOptions) (container.CommitResponse, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.CommitResponse{}, err + } + var repository, tag string if options.Reference != "" { ref, err := reference.ParseNormalizedNamed(options.Reference) if err != nil { - return types.IDResponse{}, err + return container.CommitResponse{}, err } if _, isCanonical := ref.(reference.Canonical); isCanonical { - return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + return container.CommitResponse{}, errors.New("refusing to create a tag with a digest reference") } ref = reference.TagNameOnly(ref) if tagged, ok := ref.(reference.Tagged); ok { tag = tagged.Tag() } - repository = reference.FamiliarName(ref) + repository = ref.Name() } query := url.Values{} - query.Set("container", container) + query.Set("container", containerID) query.Set("repo", repository) query.Set("tag", tag) query.Set("comment", options.Comment) @@ -44,13 +48,13 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option query.Set("pause", "0") } - var response types.IDResponse + var response container.CommitResponse resp, err := cli.post(ctx, "/commit", query, options.Config, nil) defer ensureReaderClosed(resp) if err != nil { return response, err } - err = json.NewDecoder(resp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index 8490a3b1..7c4130dc 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -16,21 +16,30 @@ import ( // ContainerStatPath returns stat information about a path inside the container filesystem. func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (container.PathStat, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.PathStat{}, err + } + query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - urlStr := "/containers/" + containerID + "/archive" - response, err := cli.head(ctx, urlStr, query, nil) - defer ensureReaderClosed(response) + resp, err := cli.head(ctx, "/containers/"+containerID+"/archive", query, nil) + defer ensureReaderClosed(resp) if err != nil { return container.PathStat{}, err } - return getContainerPathStatFromHeader(response.header) + return getContainerPathStatFromHeader(resp.Header) } // CopyToContainer copies content into the container filesystem. // Note that `content` must be a Reader for a TAR archive func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options container.CopyToContainerOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. @@ -42,9 +51,7 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str query.Set("copyUIDGID", "true") } - apiPath := "/containers/" + containerID + "/archive" - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) + response, err := cli.putRaw(ctx, "/containers/"+containerID+"/archive", query, content, nil) defer ensureReaderClosed(response) if err != nil { return err @@ -56,11 +63,15 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str // CopyFromContainer gets the content from the container and returns it as a Reader // for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, container.PathStat, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return nil, container.PathStat{}, err + } + query := make(url.Values, 1) query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - apiPath := "/containers/" + containerID + "/archive" - response, err := cli.get(ctx, apiPath, query, nil) + resp, err := cli.get(ctx, "/containers/"+containerID+"/archive", query, nil) if err != nil { return nil, container.PathStat{}, err } @@ -71,11 +82,11 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s // copy it locally. Along with the stat info about the local destination, // we have everything we need to handle the multiple possibilities there // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) + stat, err := getContainerPathStatFromHeader(resp.Header) if err != nil { return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) } - return response.body, stat, err + return resp.Body, stat, err } func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) { diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index 5442d426..0625cb12 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -1,10 +1,13 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" + "errors" "net/url" "path" + "sort" + "strings" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" @@ -12,12 +15,6 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - // ContainerCreate creates a new container based on the given configuration. // It can be associated with a name, but it's not mandatory. func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) { @@ -58,6 +55,22 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize hostConfig.ConsoleSize = [2]uint{0, 0} } + if versions.LessThan(cli.ClientVersion(), "1.44") { + for _, m := range hostConfig.Mounts { + if m.BindOptions != nil { + // ReadOnlyNonRecursive can be safely ignored when API < 1.44 + if m.BindOptions.ReadOnlyForceRecursive { + return response, errors.New("bind-recursive=readonly requires API v1.44 or later") + } + if m.BindOptions.NonRecursive && versions.LessThan(cli.ClientVersion(), "1.40") { + return response, errors.New("bind-recursive=disabled requires API v1.40 or later") + } + } + } + } + + hostConfig.CapAdd = normalizeCapabilities(hostConfig.CapAdd) + hostConfig.CapDrop = normalizeCapabilities(hostConfig.CapDrop) } // Since API 1.44, the container-wide MacAddress is deprecated and will trigger a WARNING if it's specified. @@ -74,19 +87,19 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config query.Set("name", containerName) } - body := configWrapper{ + body := container.CreateRequest{ Config: config, HostConfig: hostConfig, NetworkingConfig: networkingConfig, } - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } - err = json.NewDecoder(serverResp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } @@ -114,3 +127,42 @@ func hasEndpointSpecificMacAddress(networkingConfig *network.NetworkingConfig) b } return false } + +// allCapabilities is a magic value for "all capabilities" +const allCapabilities = "ALL" + +// normalizeCapabilities normalizes capabilities to their canonical form, +// removes duplicates, and sorts the results. +// +// It is similar to [github.com/docker/docker/oci/caps.NormalizeLegacyCapabilities], +// but performs no validation based on supported capabilities. +func normalizeCapabilities(caps []string) []string { + var normalized []string + + unique := make(map[string]struct{}) + for _, c := range caps { + c = normalizeCap(c) + if _, ok := unique[c]; ok { + continue + } + unique[c] = struct{}{} + normalized = append(normalized, c) + } + + sort.Strings(normalized) + return normalized +} + +// normalizeCap normalizes a capability to its canonical format by upper-casing +// and adding a "CAP_" prefix (if not yet present). It also accepts the "ALL" +// magic-value. +func normalizeCap(cap string) string { + cap = strings.ToUpper(cap) + if cap == allCapabilities { + return cap + } + if !strings.HasPrefix(cap, "CAP_") { + cap = "CAP_" + cap + } + return cap +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go index c22c819a..3848e311 100644 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -10,14 +10,21 @@ import ( // ContainerDiff shows differences in a container filesystem since it was started. func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.FilesystemChange, error) { - var changes []container.FilesystemChange + containerID, err := trimID("container", containerID) + if err != nil { + return nil, err + } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(resp) if err != nil { - return changes, err + return nil, err } - err = json.NewDecoder(serverResp.body).Decode(&changes) + var changes []container.FilesystemChange + err = json.NewDecoder(resp.Body).Decode(&changes) + if err != nil { + return nil, err + } return changes, err } diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 9379448d..8abbf892 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -11,8 +11,11 @@ import ( ) // ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) { - var response types.IDResponse +func (cli *Client) ContainerExecCreate(ctx context.Context, containerID string, options container.ExecOptions) (container.ExecCreateResponse, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.ExecCreateResponse{}, err + } // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. @@ -20,22 +23,24 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, op // Normally, version-negotiation (if enabled) would not happen until // the API request is made. if err := cli.checkVersion(ctx); err != nil { - return response, err + return container.ExecCreateResponse{}, err } if err := cli.NewVersionError(ctx, "1.25", "env"); len(options.Env) != 0 && err != nil { - return response, err + return container.ExecCreateResponse{}, err } if versions.LessThan(cli.ClientVersion(), "1.42") { options.ConsoleSize = nil } - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, options, nil) + resp, err := cli.post(ctx, "/containers/"+containerID+"/exec", nil, options, nil) defer ensureReaderClosed(resp) if err != nil { - return response, err + return container.ExecCreateResponse{}, err } - err = json.NewDecoder(resp.body).Decode(&response) + + var response container.ExecCreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } @@ -70,7 +75,7 @@ func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (con return response, err } - err = json.NewDecoder(resp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go index d0c0a5cb..3fc4d570 100644 --- a/vendor/github.com/docker/docker/client/container_export.go +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -10,10 +10,15 @@ import ( // and returns them as an io.ReadCloser. It's up to the caller // to close the stream. func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + containerID, err := trimID("container", containerID) if err != nil { return nil, err } - return serverResp.body, nil + resp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go index d48f0d3a..18ccdf23 100644 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" @@ -7,46 +7,50 @@ import ( "io" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - if containerID == "" { - return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.InspectResponse{}, err } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) + + resp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + defer ensureReaderClosed(resp) if err != nil { - return types.ContainerJSON{}, err + return container.InspectResponse{}, err } - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) + var response container.InspectResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } // ContainerInspectWithRaw returns the container information and its raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - if containerID == "" { - return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (container.InspectResponse, []byte, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.InspectResponse{}, nil, err } + query := url.Values{} if getSize { query.Set("size", "1") } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + defer ensureReaderClosed(resp) if err != nil { - return types.ContainerJSON{}, nil, err + return container.InspectResponse{}, nil, err } - body, err := io.ReadAll(serverResp.body) + body, err := io.ReadAll(resp.Body) if err != nil { - return types.ContainerJSON{}, nil, err + return container.InspectResponse{}, nil, err } - var response types.ContainerJSON + var response container.InspectResponse rdr := bytes.NewReader(body) err = json.NewDecoder(rdr).Decode(&response) return response, body, err diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go index 7c9529f1..251ae479 100644 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -7,6 +7,11 @@ import ( // ContainerKill terminates the container process but does not remove the container from the docker host. func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} if signal != "" { query.Set("signal", signal) diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index 782e1b3c..e17b14ac 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -6,13 +6,12 @@ import ( "net/url" "strconv" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" ) // ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) { +func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error) { query := url.Values{} if options.All { @@ -51,7 +50,7 @@ func (cli *Client) ContainerList(ctx context.Context, options container.ListOpti return nil, err } - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) + var containers []container.Summary + err = json.NewDecoder(resp.Body).Decode(&containers) return containers, err } diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 61197d84..3ea1f68d 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -33,7 +33,12 @@ import ( // // You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this // stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) { +func (cli *Client) ContainerLogs(ctx context.Context, containerID string, options container.LogsOptions) (io.ReadCloser, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return nil, err + } + query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") @@ -72,9 +77,9 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options } query.Set("tail", options.Tail) - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + resp, err := cli.get(ctx, "/containers/"+containerID+"/logs", query, nil) if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go index 5e7271a3..59b3e2d8 100644 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -1,9 +1,14 @@ -package client // import "github.com/docker/docker/client" +package client import "context" // ContainerPause pauses the main process of a given container without terminating it. func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index 29c922da..84fb6bc2 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -11,25 +11,24 @@ import ( // ContainersPrune requests the daemon to delete unused data func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) { - var report container.PruneReport - if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil { - return report, err + return container.PruneReport{}, err } query, err := getFiltersQuery(pruneFilters) if err != nil { - return report, err + return container.PruneReport{}, err } - serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(resp) if err != nil { - return report, err + return container.PruneReport{}, err } - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) + var report container.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return container.PruneReport{}, fmt.Errorf("Error retrieving disk usage: %v", err) } return report, nil diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index 39f7b106..b1a2ce6b 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,6 +9,11 @@ import ( // ContainerRemove kills and removes a container from the docker host. func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} if options.RemoveVolumes { query.Set("v", "1") diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go index 240fdf55..4c030228 100644 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -7,6 +7,11 @@ import ( // ContainerRename changes the name of a given container. func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} query.Set("name", newContainerName) resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go index 5cfd01d4..56b7368b 100644 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -10,18 +10,27 @@ import ( // ContainerResize changes the size of the tty for a container. func (cli *Client) ContainerResize(ctx context.Context, containerID string, options container.ResizeOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) } // ContainerExecResize changes the size of the tty for an exec process running inside a container. func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error { + execID, err := trimID("exec", execID) + if err != nil { + return err + } return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) } func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + // FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint. query := url.Values{} - query.Set("h", strconv.Itoa(int(height))) - query.Set("w", strconv.Itoa(int(width))) + query.Set("h", strconv.FormatUint(uint64(height), 10)) + query.Set("w", strconv.FormatUint(uint64(width), 10)) resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index 02b5079b..5af07bfc 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -13,6 +13,11 @@ import ( // It makes the daemon wait for the container to be up again for // a specific amount of time, given the timeout. func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} if options.Timeout != nil { query.Set("t", strconv.Itoa(*options.Timeout)) diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go index 33ba85f2..c7206e32 100644 --- a/vendor/github.com/docker/docker/client/container_start.go +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,11 +9,16 @@ import ( // ContainerStart sends a request to the docker daemon to start a container. func (cli *Client) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} - if len(options.CheckpointID) != 0 { + if options.CheckpointID != "" { query.Set("checkpoint", options.CheckpointID) } - if len(options.CheckpointDir) != 0 { + if options.CheckpointDir != "" { query.Set("checkpoint-dir", options.CheckpointDir) } diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index b5641dae..2244e0f4 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -10,6 +10,11 @@ import ( // ContainerStats returns near realtime stats for a given container. // It's up to the caller to close the io.ReadCloser returned. func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (container.StatsResponseReader, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.StatsResponseReader{}, err + } + query := url.Values{} query.Set("stream", "0") if stream { @@ -22,14 +27,19 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea } return container.StatsResponseReader{ - Body: resp.body, - OSType: getDockerOS(resp.header.Get("Server")), + Body: resp.Body, + OSType: getDockerOS(resp.Header.Get("Server")), }, nil } // ContainerStatsOneShot gets a single stat entry from a container. // It differs from `ContainerStats` in that the API should not wait to prime the stats func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (container.StatsResponseReader, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.StatsResponseReader{}, err + } + query := url.Values{} query.Set("stream", "0") query.Set("one-shot", "1") @@ -40,7 +50,7 @@ func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string } return container.StatsResponseReader{ - Body: resp.body, - OSType: getDockerOS(resp.header.Get("Server")), + Body: resp.Body, + OSType: getDockerOS(resp.Header.Get("Server")), }, nil } diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index 7c98a354..175b9c8b 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -17,6 +17,11 @@ import ( // otherwise the engine default. A negative timeout value can be specified, // meaning no timeout, i.e. no forceful termination is performed. func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + query := url.Values{} if options.Timeout != nil { query.Set("t", strconv.Itoa(*options.Timeout)) diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go index a5b78999..5770f9d4 100644 --- a/vendor/github.com/docker/docker/client/container_top.go +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -10,8 +10,12 @@ import ( ) // ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { - var response container.ContainerTopOKBody +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.TopResponse, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return container.TopResponse{}, err + } + query := url.Values{} if len(arguments) > 0 { query.Set("ps_args", strings.Join(arguments, " ")) @@ -20,9 +24,10 @@ func (cli *Client) ContainerTop(ctx context.Context, containerID string, argumen resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) defer ensureReaderClosed(resp) if err != nil { - return response, err + return container.TopResponse{}, err } - err = json.NewDecoder(resp.body).Decode(&response) + var response container.TopResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go index 1d8f8731..c95f6e3a 100644 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -1,9 +1,14 @@ -package client // import "github.com/docker/docker/client" +package client import "context" // ContainerUnpause resumes the process execution within a container func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + containerID, err := trimID("container", containerID) + if err != nil { + return err + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go index bf68a530..10e966d0 100644 --- a/vendor/github.com/docker/docker/client/container_update.go +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -8,14 +8,19 @@ import ( ) // ContainerUpdate updates the resources of a container. -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { - var response container.ContainerUpdateOKBody - serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - defer ensureReaderClosed(serverResp) +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.UpdateResponse, error) { + containerID, err := trimID("container", containerID) if err != nil { - return response, err + return container.UpdateResponse{}, err } - err = json.NewDecoder(serverResp.body).Decode(&response) + resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(resp) + if err != nil { + return container.UpdateResponse{}, err + } + + var response container.UpdateResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 8bb6be0a..75c03a12 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" @@ -33,6 +33,12 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit resultC := make(chan container.WaitResponse) errC := make(chan error, 1) + containerID, err := trimID("container", containerID) + if err != nil { + errC <- err + return resultC, errC + } + // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. // @@ -61,9 +67,8 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit go func() { defer ensureReaderClosed(resp) - body := resp.body responseText := bytes.NewBuffer(nil) - stream := io.TeeReader(body, responseText) + stream := io.TeeReader(resp.Body, responseText) var res container.WaitResponse if err := json.NewDecoder(stream).Decode(&res); err != nil { @@ -105,7 +110,7 @@ func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) defer ensureReaderClosed(resp) var res container.WaitResponse - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + if err := json.NewDecoder(resp.Body).Decode(&res); err != nil { errC <- err return } diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go index ba0d92e9..729e1057 100644 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -19,14 +19,14 @@ func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions } } - serverResp, err := cli.get(ctx, "/system/df", query, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/system/df", query, nil) + defer ensureReaderClosed(resp) if err != nil { return types.DiskUsage{}, err } var du types.DiskUsage - if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + if err := json.NewDecoder(resp.Body).Decode(&du); err != nil { return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) } return du, nil diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index 68e6ec5e..693c4121 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -11,14 +11,12 @@ import ( // DistributionInspect returns the image digest with the full manifest. func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedRegistryAuth string) (registry.DistributionInspect, error) { - // Contact the registry to retrieve digest and platform information - var distributionInspect registry.DistributionInspect if imageRef == "" { - return distributionInspect, objectNotFoundError{object: "distribution", id: imageRef} + return registry.DistributionInspect{}, objectNotFoundError{object: "distribution", id: imageRef} } if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil { - return distributionInspect, err + return registry.DistributionInspect{}, err } var headers http.Header @@ -28,12 +26,14 @@ func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedReg } } + // Contact the registry to retrieve digest and platform information resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers) defer ensureReaderClosed(resp) if err != nil { - return distributionInspect, err + return registry.DistributionInspect{}, err } - err = json.NewDecoder(resp.body).Decode(&distributionInspect) + var distributionInspect registry.DistributionInspect + err = json.NewDecoder(resp.Body).Decode(&distributionInspect) return distributionInspect, err } diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/docker/docker/client/envvars.go index 61dd45c1..abe122d1 100644 --- a/vendor/github.com/docker/docker/client/envvars.go +++ b/vendor/github.com/docker/docker/client/envvars.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client const ( // EnvOverrideHost is the name of the environment variable that can be used diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 0d01e243..9e3a2538 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -1,12 +1,14 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" + "errors" "fmt" + "net/http" + cerrdefs "github.com/containerd/errdefs" + "github.com/containerd/errdefs/pkg/errhttp" "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" ) // errConnectionFailed implements an error returned when connection failed. @@ -29,10 +31,18 @@ func IsErrConnectionFailed(err error) bool { } // ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +// +// Deprecated: this function was only used internally, and will be removed in the next release. func ErrorConnectionFailed(host string) error { + return connectionFailed(host) +} + +// connectionFailed returns an error with host in the error message when connection +// to docker daemon failed. +func connectionFailed(host string) error { var err error if host == "" { - err = fmt.Errorf("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") + err = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") } else { err = fmt.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host) } @@ -40,9 +50,11 @@ func ErrorConnectionFailed(host string) error { } // IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. It is an alias for [errdefs.IsNotFound]. +// by the API when some object is not found. It is an alias for [cerrdefs.IsNotFound]. +// +// Deprecated: use [cerrdefs.IsNotFound] instead. func IsErrNotFound(err error) bool { - return errdefs.IsNotFound(err) + return cerrdefs.IsNotFound(err) } type objectNotFoundError struct { @@ -75,3 +87,43 @@ func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature str } return nil } + +type httpError struct { + err error + errdef error +} + +func (e *httpError) Error() string { + return e.err.Error() +} + +func (e *httpError) Unwrap() error { + return e.err +} + +func (e *httpError) Is(target error) bool { + return errors.Is(e.errdef, target) +} + +// httpErrorFromStatusCode creates an errdef error, based on the provided HTTP status-code +func httpErrorFromStatusCode(err error, statusCode int) error { + if err == nil { + return nil + } + base := errhttp.ToNative(statusCode) + if base != nil { + return &httpError{err: err, errdef: base} + } + + switch { + case statusCode >= http.StatusOK && statusCode < http.StatusBadRequest: + // it's a client error + return err + case statusCode >= http.StatusBadRequest && statusCode < http.StatusInternalServerError: + return &httpError{err: err, errdef: cerrdefs.ErrInvalidArgument} + case statusCode >= http.StatusInternalServerError && statusCode < 600: + return &httpError{err: err, errdef: cerrdefs.ErrInternal} + default: + return &httpError{err: err, errdef: cerrdefs.ErrUnknown} + } +} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index d3ab26be..498fe463 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -36,9 +36,9 @@ func (cli *Client) Events(ctx context.Context, options events.ListOptions) (<-ch errs <- err return } - defer resp.body.Close() + defer resp.Body.Close() - decoder := json.NewDecoder(resp.body) + decoder := json.NewDecoder(resp.Body) close(started) for { diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 839d4c5c..01d121a6 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bufio" @@ -25,32 +25,36 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu if err != nil { return types.HijackedResponse{}, err } - conn, mediaType, err := cli.setupHijackConn(req, "tcp") + conn, mediaType, err := setupHijackConn(cli.dialer(), req, "tcp") if err != nil { return types.HijackedResponse{}, err } - return types.NewHijackedResponse(conn, mediaType), err + if versions.LessThan(cli.ClientVersion(), "1.42") { + // Prior to 1.42, Content-Type is always set to raw-stream and not relevant + mediaType = "" + } + + return types.NewHijackedResponse(conn, mediaType), nil } // DialHijack returns a hijacked connection with negotiated protocol proto. func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, http.NoBody) if err != nil { return nil, err } req = cli.addHeaders(req, meta) - conn, _, err := cli.setupHijackConn(req, proto) + conn, _, err := setupHijackConn(cli.Dialer(), req, proto) return conn, err } -func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, _ string, retErr error) { +func setupHijackConn(dialer func(context.Context) (net.Conn, error), req *http.Request, proto string) (_ net.Conn, _ string, retErr error) { ctx := req.Context() req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) - dialer := cli.Dialer() conn, err := dialer(ctx) if err != nil { return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") @@ -96,13 +100,7 @@ func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, hc.r.Reset(nil) } - var mediaType string - if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") { - // Prior to 1.42, Content-Type is always set to raw-stream and not relevant - mediaType = resp.Header.Get("Content-Type") - } - - return conn, mediaType, nil + return conn, resp.Header.Get("Content-Type"), nil } // hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index d294ddc8..66ca75e4 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -10,44 +10,50 @@ import ( "strconv" "strings" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" ) // ImageBuild sends a request to the daemon to build images. // The Body in the response implements an io.ReadCloser and it's up to the caller to // close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options build.ImageBuildOptions) (build.ImageBuildResponse, error) { query, err := cli.imageBuildOptionsToQuery(ctx, options) if err != nil { - return types.ImageBuildResponse{}, err + return build.ImageBuildResponse{}, err } buf, err := json.Marshal(options.AuthConfigs) if err != nil { - return types.ImageBuildResponse{}, err + return build.ImageBuildResponse{}, err } headers := http.Header{} headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) headers.Set("Content-Type", "application/x-tar") - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + resp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) if err != nil { - return types.ImageBuildResponse{}, err + return build.ImageBuildResponse{}, err } - return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: getDockerOS(serverResp.header.Get("Server")), + return build.ImageBuildResponse{ + Body: resp.Body, + OSType: getDockerOS(resp.Header.Get("Server")), }, nil } -func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - "securityopt": options.SecurityOpt, - "extrahosts": options.ExtraHosts, +func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options build.ImageBuildOptions) (url.Values, error) { + query := url.Values{} + if len(options.Tags) > 0 { + query["t"] = options.Tags + } + if len(options.SecurityOpt) > 0 { + query["securityopt"] = options.SecurityOpt + } + if len(options.ExtraHosts) > 0 { + query["extrahosts"] = options.ExtraHosts } if options.SuppressOutput { query.Set("q", "1") @@ -58,9 +64,11 @@ func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.I if options.NoCache { query.Set("nocache", "1") } - if options.Remove { - query.Set("rm", "1") - } else { + if !options.Remove { + // only send value when opting out because the daemon's default is + // to remove intermediate containers after a successful build, + // + // TODO(thaJeztah): deprecate "Remove" option, and provide a "NoRemove" or "Keep" option instead. query.Set("rm", "0") } @@ -83,42 +91,70 @@ func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.I query.Set("isolation", string(options.Isolation)) } - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("networkmode", options.NetworkMode) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - query.Set("target", options.Target) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err + if options.CPUSetCPUs != "" { + query.Set("cpusetcpus", options.CPUSetCPUs) } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err + if options.NetworkMode != "" && options.NetworkMode != network.NetworkDefault { + query.Set("networkmode", options.NetworkMode) } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err + if options.CPUSetMems != "" { + query.Set("cpusetmems", options.CPUSetMems) } - query.Set("labels", string(labelsJSON)) - - cacheFromJSON, err := json.Marshal(options.CacheFrom) - if err != nil { - return query, err + if options.CPUShares != 0 { + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + } + if options.CPUQuota != 0 { + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + } + if options.CPUPeriod != 0 { + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + } + if options.Memory != 0 { + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + } + if options.MemorySwap != 0 { + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + } + if options.CgroupParent != "" { + query.Set("cgroupparent", options.CgroupParent) + } + if options.ShmSize != 0 { + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + } + if options.Dockerfile != "" { + query.Set("dockerfile", options.Dockerfile) + } + if options.Target != "" { + query.Set("target", options.Target) + } + if len(options.Ulimits) != 0 { + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + } + if len(options.BuildArgs) != 0 { + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + } + if len(options.Labels) != 0 { + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + } + if len(options.CacheFrom) != 0 { + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) } - query.Set("cachefrom", string(cacheFromJSON)) if options.SessionID != "" { query.Set("session", options.SessionID) } @@ -131,7 +167,9 @@ func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.I if options.BuildID != "" { query.Set("buildid", options.BuildID) } - query.Set("version", string(options.Version)) + if options.Version != "" { + query.Set("version", string(options.Version)) + } if options.Outputs != nil { outputsJSON, err := json.Marshal(options.Outputs) diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index 7c7873dc..1e044d77 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -21,7 +21,7 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti } query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("fromImage", ref.Name()) query.Set("tag", getAPITagFromNamedRef(ref)) if options.Platform != "" { query.Set("platform", strings.ToLower(options.Platform)) @@ -30,10 +30,10 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { return cli.post(ctx, "/images/create", query, nil, http.Header{ registry.AuthHeader: {registryAuth}, }) diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go index b5bea10d..fce8b80e 100644 --- a/vendor/github.com/docker/docker/client/image_history.go +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -1,22 +1,56 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" + "fmt" "net/url" "github.com/docker/docker/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) +// ImageHistoryWithPlatform sets the platform for the image history operation. +func ImageHistoryWithPlatform(platform ocispec.Platform) ImageHistoryOption { + return imageHistoryOptionFunc(func(opt *imageHistoryOpts) error { + if opt.apiOptions.Platform != nil { + return fmt.Errorf("platform already set to %s", *opt.apiOptions.Platform) + } + opt.apiOptions.Platform = &platform + return nil + }) +} + // ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { - var history []image.HistoryResponseItem - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - defer ensureReaderClosed(serverResp) +func (cli *Client) ImageHistory(ctx context.Context, imageID string, historyOpts ...ImageHistoryOption) ([]image.HistoryResponseItem, error) { + query := url.Values{} + + var opts imageHistoryOpts + for _, o := range historyOpts { + if err := o.Apply(&opts); err != nil { + return nil, err + } + } + + if opts.apiOptions.Platform != nil { + if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil { + return nil, err + } + + p, err := encodePlatform(opts.apiOptions.Platform) + if err != nil { + return nil, err + } + query.Set("platform", p) + } + + resp, err := cli.get(ctx, "/images/"+imageID+"/history", query, nil) + defer ensureReaderClosed(resp) if err != nil { - return history, err + return nil, err } - err = json.NewDecoder(serverResp.body).Decode(&history) + var history []image.HistoryResponseItem + err = json.NewDecoder(resp.Body).Decode(&history) return history, err } diff --git a/vendor/github.com/docker/docker/client/image_history_opts.go b/vendor/github.com/docker/docker/client/image_history_opts.go new file mode 100644 index 00000000..6d3494dd --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history_opts.go @@ -0,0 +1,19 @@ +package client + +import ( + "github.com/docker/docker/api/types/image" +) + +// ImageHistoryOption is a type representing functional options for the image history operation. +type ImageHistoryOption interface { + Apply(*imageHistoryOpts) error +} +type imageHistoryOptionFunc func(opt *imageHistoryOpts) error + +func (f imageHistoryOptionFunc) Apply(o *imageHistoryOpts) error { + return f(o) +} + +type imageHistoryOpts struct { + apiOptions image.HistoryOptions +} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index 43d55eda..5236dbc6 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -21,10 +21,18 @@ func (cli *Client) ImageImport(ctx context.Context, source image.ImportSource, r } query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) + if source.SourceName != "" { + query.Set("fromSrc", source.SourceName) + } + if ref != "" { + query.Set("repo", ref) + } + if options.Tag != "" { + query.Set("tag", options.Tag) + } + if options.Message != "" { + query.Set("message", options.Message) + } if options.Platform != "" { query.Set("platform", strings.ToLower(options.Platform)) } @@ -36,5 +44,5 @@ func (cli *Client) ImageImport(ctx context.Context, source image.ImportSource, r if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go index 1de10e5a..4c350031 100644 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -1,32 +1,76 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" "context" "encoding/json" + "fmt" "io" + "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" ) -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { +// ImageInspect returns the image information. +func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts ...ImageInspectOption) (image.InspectResponse, error) { if imageID == "" { - return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} + return image.InspectResponse{}, objectNotFoundError{object: "image", id: imageID} } - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ImageInspect{}, nil, err + + var opts imageInspectOpts + for _, opt := range inspectOpts { + if err := opt.Apply(&opts); err != nil { + return image.InspectResponse{}, fmt.Errorf("error applying image inspect option: %w", err) + } + } + + query := url.Values{} + if opts.apiOptions.Manifests { + if err := cli.NewVersionError(ctx, "1.48", "manifests"); err != nil { + return image.InspectResponse{}, err + } + query.Set("manifests", "1") + } + + if opts.apiOptions.Platform != nil { + if err := cli.NewVersionError(ctx, "1.49", "platform"); err != nil { + return image.InspectResponse{}, err + } + platform, err := encodePlatform(opts.apiOptions.Platform) + if err != nil { + return image.InspectResponse{}, err + } + query.Set("platform", platform) } - body, err := io.ReadAll(serverResp.body) + resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) + defer ensureReaderClosed(resp) if err != nil { - return types.ImageInspect{}, nil, err + return image.InspectResponse{}, err + } + + buf := opts.raw + if buf == nil { + buf = &bytes.Buffer{} + } + + if _, err := io.Copy(buf, resp.Body); err != nil { + return image.InspectResponse{}, err } - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err + var response image.InspectResponse + err = json.Unmarshal(buf.Bytes(), &response) + return response, err +} + +// ImageInspectWithRaw returns the image information and its raw representation. +// +// Deprecated: Use [Client.ImageInspect] instead. Raw response can be obtained using the [ImageInspectWithRawResponse] option. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (image.InspectResponse, []byte, error) { + var buf bytes.Buffer + resp, err := cli.ImageInspect(ctx, imageID, ImageInspectWithRawResponse(&buf)) + if err != nil { + return image.InspectResponse{}, nil, err + } + return resp, buf.Bytes(), err } diff --git a/vendor/github.com/docker/docker/client/image_inspect_opts.go b/vendor/github.com/docker/docker/client/image_inspect_opts.go new file mode 100644 index 00000000..655cbf0b --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect_opts.go @@ -0,0 +1,62 @@ +package client + +import ( + "bytes" + + "github.com/docker/docker/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageInspectOption is a type representing functional options for the image inspect operation. +type ImageInspectOption interface { + Apply(*imageInspectOpts) error +} +type imageInspectOptionFunc func(opt *imageInspectOpts) error + +func (f imageInspectOptionFunc) Apply(o *imageInspectOpts) error { + return f(o) +} + +// ImageInspectWithRawResponse instructs the client to additionally store the +// raw inspect response in the provided buffer. +func ImageInspectWithRawResponse(raw *bytes.Buffer) ImageInspectOption { + return imageInspectOptionFunc(func(opts *imageInspectOpts) error { + opts.raw = raw + return nil + }) +} + +// ImageInspectWithManifests sets manifests API option for the image inspect operation. +// This option is only available for API version 1.48 and up. +// With this option set, the image inspect operation response will have the +// [image.InspectResponse.Manifests] field populated if the server is multi-platform capable. +func ImageInspectWithManifests(manifests bool) ImageInspectOption { + return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { + clientOpts.apiOptions.Manifests = manifests + return nil + }) +} + +// ImageInspectWithPlatform sets platform API option for the image inspect operation. +// This option is only available for API version 1.49 and up. +// With this option set, the image inspect operation will return information for the +// specified platform variant of the multi-platform image. +func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption { + return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { + clientOpts.apiOptions.Platform = platform + return nil + }) +} + +// ImageInspectWithAPIOpts sets the API options for the image inspect operation. +func ImageInspectWithAPIOpts(opts image.InspectOptions) ImageInspectOption { + return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { + clientOpts.apiOptions = opts + return nil + }) +} + +type imageInspectOpts struct { + raw *bytes.Buffer + apiOptions image.InspectOptions +} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index bef67943..ec0a2ad5 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -56,12 +56,12 @@ func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([] query.Set("manifests", "1") } - serverResp, err := cli.get(ctx, "/images/json", query, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(resp) if err != nil { return images, err } - err = json.NewDecoder(serverResp.body).Decode(&images) + err = json.NewDecoder(resp.Body).Decode(&images) return images, err } diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go index c68f0013..079002e9 100644 --- a/vendor/github.com/docker/docker/client/image_load.go +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -12,20 +12,43 @@ import ( // ImageLoad loads an image in the docker host from the client host. // It's up to the caller to close the io.ReadCloser in the // ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) { - v := url.Values{} - v.Set("quiet", "0") - if quiet { - v.Set("quiet", "1") +// +// Platform is an optional parameter that specifies the platform to load from +// the provided multi-platform image. This is only has effect if the input image +// is a multi-platform image. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, loadOpts ...ImageLoadOption) (image.LoadResponse, error) { + var opts imageLoadOpts + for _, opt := range loadOpts { + if err := opt.Apply(&opts); err != nil { + return image.LoadResponse{}, err + } } - resp, err := cli.postRaw(ctx, "/images/load", v, input, http.Header{ + + query := url.Values{} + query.Set("quiet", "0") + if opts.apiOptions.Quiet { + query.Set("quiet", "1") + } + if len(opts.apiOptions.Platforms) > 0 { + if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil { + return image.LoadResponse{}, err + } + + p, err := encodePlatforms(opts.apiOptions.Platforms...) + if err != nil { + return image.LoadResponse{}, err + } + query["platform"] = p + } + + resp, err := cli.postRaw(ctx, "/images/load", query, input, http.Header{ "Content-Type": {"application/x-tar"}, }) if err != nil { return image.LoadResponse{}, err } return image.LoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", + Body: resp.Body, + JSON: resp.Header.Get("Content-Type") == "application/json", }, nil } diff --git a/vendor/github.com/docker/docker/client/image_load_opts.go b/vendor/github.com/docker/docker/client/image_load_opts.go new file mode 100644 index 00000000..ebcedd41 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load_opts.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageLoadOption is a type representing functional options for the image load operation. +type ImageLoadOption interface { + Apply(*imageLoadOpts) error +} +type imageLoadOptionFunc func(opt *imageLoadOpts) error + +func (f imageLoadOptionFunc) Apply(o *imageLoadOpts) error { + return f(o) +} + +type imageLoadOpts struct { + apiOptions image.LoadOptions +} + +// ImageLoadWithQuiet sets the quiet option for the image load operation. +func ImageLoadWithQuiet(quiet bool) ImageLoadOption { + return imageLoadOptionFunc(func(opt *imageLoadOpts) error { + opt.apiOptions.Quiet = quiet + return nil + }) +} + +// ImageLoadWithPlatforms sets the platforms to be loaded from the image. +func ImageLoadWithPlatforms(platforms ...ocispec.Platform) ImageLoadOption { + return imageLoadOptionFunc(func(opt *imageLoadOpts) error { + if opt.apiOptions.Platforms != nil { + return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms) + } + opt.apiOptions.Platforms = platforms + return nil + }) +} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index 5ee987e2..52e8bcf5 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -11,25 +11,24 @@ import ( // ImagesPrune requests the daemon to delete unused data func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (image.PruneReport, error) { - var report image.PruneReport - if err := cli.NewVersionError(ctx, "1.25", "image prune"); err != nil { - return report, err + return image.PruneReport{}, err } query, err := getFiltersQuery(pruneFilters) if err != nil { - return report, err + return image.PruneReport{}, err } - serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(resp) if err != nil { - return report, err + return image.PruneReport{}, err } - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) + var report image.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return image.PruneReport{}, fmt.Errorf("Error retrieving disk usage: %v", err) } return report, nil diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index 1634c4c8..ab7606b4 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -6,9 +6,9 @@ import ( "net/url" "strings" + cerrdefs "github.com/containerd/errdefs" "github.com/distribution/reference" "github.com/docker/docker/api/types/image" - "github.com/docker/docker/errdefs" ) // ImagePull requests the docker host to pull an image from a remote registry. @@ -26,7 +26,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P } query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("fromImage", ref.Name()) if !options.All { query.Set("tag", getAPITagFromNamedRef(ref)) } @@ -35,7 +35,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return nil, privilegeErr @@ -45,7 +45,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } // getAPITagFromNamedRef returns a tag from the specified reference. diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index 16f9c465..cbbe9a25 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,10 +9,10 @@ import ( "net/http" "net/url" + cerrdefs "github.com/containerd/errdefs" "github.com/distribution/reference" "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" ) // ImagePush requests the docker host to push an image to a remote registry. @@ -29,7 +29,6 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu return nil, errors.New("cannot push a digest reference") } - name := reference.FamiliarName(ref) query := url.Values{} if !options.All { ref = reference.TagNameOnly(ref) @@ -52,21 +51,21 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu query.Set("platform", string(pJson)) } - resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + resp, err := cli.tryImagePush(ctx, ref.Name(), query, options.RegistryAuth) + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return nil, privilegeErr } - resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + resp, err = cli.tryImagePush(ctx, ref.Name(), query, newAuthHeader) } if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*http.Response, error) { return cli.post(ctx, "/images/"+imageID+"/push", query, nil, http.Header{ registry.AuthHeader: {registryAuth}, }) diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 652d1bfa..8f357c72 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -19,13 +19,21 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options imag query.Set("noprune", "1") } - var dels []image.DeleteResponse + if len(options.Platforms) > 0 { + p, err := encodePlatforms(options.Platforms...) + if err != nil { + return nil, err + } + query["platforms"] = p + } + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) defer ensureReaderClosed(resp) if err != nil { - return dels, err + return nil, err } - err = json.NewDecoder(resp.body).Decode(&dels) + var dels []image.DeleteResponse + err = json.NewDecoder(resp.Body).Decode(&dels) return dels, err } diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go index d1314e4b..d2102bec 100644 --- a/vendor/github.com/docker/docker/client/image_save.go +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -7,15 +7,35 @@ import ( ) // ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { +// +// Platforms is an optional parameter that specifies the platforms to save from the image. +// This is only has effect if the input image is a multi-platform image. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string, saveOpts ...ImageSaveOption) (io.ReadCloser, error) { + var opts imageSaveOpts + for _, opt := range saveOpts { + if err := opt.Apply(&opts); err != nil { + return nil, err + } + } + query := url.Values{ "names": imageIDs, } + if len(opts.apiOptions.Platforms) > 0 { + if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil { + return nil, err + } + p, err := encodePlatforms(opts.apiOptions.Platforms...) + if err != nil { + return nil, err + } + query["platform"] = p + } + resp, err := cli.get(ctx, "/images/get", query, nil) if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/image_save_opts.go b/vendor/github.com/docker/docker/client/image_save_opts.go new file mode 100644 index 00000000..acd8f282 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save_opts.go @@ -0,0 +1,33 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ImageSaveOption interface { + Apply(*imageSaveOpts) error +} + +type imageSaveOptionFunc func(opt *imageSaveOpts) error + +func (f imageSaveOptionFunc) Apply(o *imageSaveOpts) error { + return f(o) +} + +// ImageSaveWithPlatforms sets the platforms to be saved from the image. +func ImageSaveWithPlatforms(platforms ...ocispec.Platform) ImageSaveOption { + return imageSaveOptionFunc(func(opt *imageSaveOpts) error { + if opt.apiOptions.Platforms != nil { + return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms) + } + opt.apiOptions.Platforms = platforms + return nil + }) +} + +type imageSaveOpts struct { + apiOptions image.SaveOptions +} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 0a074575..8f5343b9 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -7,9 +7,9 @@ import ( "net/url" "strconv" + cerrdefs "github.com/containerd/errdefs" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" ) // ImageSearch makes the docker host search by a term in a remote registry. @@ -32,7 +32,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options registr resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) defer ensureReaderClosed(resp) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return results, privilegeErr @@ -43,11 +43,11 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options registr return results, err } - err = json.NewDecoder(resp.body).Decode(&results) + err = json.NewDecoder(resp.Body).Decode(&results) return results, err } -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { return cli.get(ctx, "/images/search", query, http.Header{ registry.AuthHeader: {registryAuth}, }) diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go index ea6b4a1e..2bfafc51 100644 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -26,7 +26,7 @@ func (cli *Client) ImageTag(ctx context.Context, source, target string) error { ref = reference.TagNameOnly(ref) query := url.Values{} - query.Set("repo", reference.FamiliarName(ref)) + query.Set("repo", ref.Name()) if tagged, ok := ref.(reference.Tagged); ok { query.Set("tag", tagged.Tag()) } diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go index cc3fcc46..ed85d7f8 100644 --- a/vendor/github.com/docker/docker/client/info.go +++ b/vendor/github.com/docker/docker/client/info.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -12,13 +12,13 @@ import ( // Info returns information about the docker server. func (cli *Client) Info(ctx context.Context) (system.Info, error) { var info system.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(resp) if err != nil { return info, err } - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { return info, fmt.Errorf("Error reading remote info: %v", err) } diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go deleted file mode 100644 index 5502cd74..00000000 --- a/vendor/github.com/docker/docker/client/interface_stable.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - CommonAPIClient - apiClientExperimental -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go index 19e985e0..2d7f1790 100644 --- a/vendor/github.com/docker/docker/client/login.go +++ b/vendor/github.com/docker/docker/client/login.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -19,6 +19,6 @@ func (cli *Client) RegistryLogin(ctx context.Context, auth registry.AuthConfig) } var response registry.AuthenticateOKBody - err = json.NewDecoder(resp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go index 8daf8906..f7526c5d 100644 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -8,6 +8,16 @@ import ( // NetworkConnect connects a container to an existent network in the docker host. func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + networkID, err := trimID("network", networkID) + if err != nil { + return err + } + + containerID, err = trimID("container", containerID) + if err != nil { + return err + } + nc := network.ConnectOptions{ Container: containerID, EndpointConfig: config, diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index 850e31cc..6a7f2ea5 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -10,15 +10,13 @@ import ( // NetworkCreate creates a new network in the docker host. func (cli *Client) NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) { - var response network.CreateResponse - // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. if err := cli.checkVersion(ctx); err != nil { - return response, err + return network.CreateResponse{}, err } networkCreateRequest := network.CreateRequest{ @@ -30,12 +28,13 @@ func (cli *Client) NetworkCreate(ctx context.Context, name string, options netwo networkCreateRequest.CheckDuplicate = &enabled //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. } - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + defer ensureReaderClosed(resp) if err != nil { - return response, err + return network.CreateResponse{}, err } - err = json.NewDecoder(serverResp.body).Decode(&response) + var response network.CreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go index aaf428d8..55f9b6a2 100644 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -8,6 +8,16 @@ import ( // NetworkDisconnect disconnects a container from an existent network in the docker host. func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + networkID, err := trimID("network", networkID) + if err != nil { + return err + } + + containerID, err = trimID("container", containerID) + if err != nil { + return err + } + nd := network.DisconnectOptions{ Container: containerID, Force: force, diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index afc47de6..734ec102 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" @@ -18,8 +18,9 @@ func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options // NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, []byte, error) { - if networkID == "" { - return network.Inspect{}, nil, objectNotFoundError{object: "network", id: networkID} + networkID, err := trimID("network", networkID) + if err != nil { + return network.Inspect{}, nil, err } query := url.Values{} if options.Verbose { @@ -35,7 +36,7 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, return network.Inspect{}, nil, err } - raw, err := io.ReadAll(resp.body) + raw, err := io.ReadAll(resp.Body) if err != nil { return network.Inspect{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go index 72957d47..8d933619 100644 --- a/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -27,6 +27,6 @@ func (cli *Client) NetworkList(ctx context.Context, options network.ListOptions) if err != nil { return networkResources, err } - err = json.NewDecoder(resp.body).Decode(&networkResources) + err = json.NewDecoder(resp.Body).Decode(&networkResources) return networkResources, err } diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go index 708cc61a..7835fe90 100644 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -11,25 +11,24 @@ import ( // NetworksPrune requests the daemon to delete unused networks func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (network.PruneReport, error) { - var report network.PruneReport - if err := cli.NewVersionError(ctx, "1.25", "network prune"); err != nil { - return report, err + return network.PruneReport{}, err } query, err := getFiltersQuery(pruneFilters) if err != nil { - return report, err + return network.PruneReport{}, err } - serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(resp) if err != nil { - return report, err + return network.PruneReport{}, err } - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving network prune report: %v", err) + var report network.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return network.PruneReport{}, fmt.Errorf("Error retrieving network prune report: %v", err) } return report, nil diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go index 9d6c6cef..9b164d3e 100644 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -1,9 +1,13 @@ -package client // import "github.com/docker/docker/client" +package client import "context" // NetworkRemove removes an existent network from the docker host. func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + networkID, err := trimID("network", networkID) + if err != nil { + return err + } resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) defer ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go index 95ab9b1b..dd1f1f8a 100644 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" @@ -11,16 +11,17 @@ import ( // NodeInspectWithRaw returns the node information. func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - if nodeID == "" { - return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} + nodeID, err := trimID("node", nodeID) + if err != nil { + return swarm.Node{}, nil, err } - serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Node{}, nil, err } - body, err := io.ReadAll(serverResp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return swarm.Node{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go index 1a9e6bfb..3b393ffe 100644 --- a/vendor/github.com/docker/docker/client/node_list.go +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -1,17 +1,16 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" ) // NodeList returns the list of nodes. -func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { +func (cli *Client) NodeList(ctx context.Context, options swarm.NodeListOptions) ([]swarm.Node, error) { query := url.Values{} if options.Filters.Len() > 0 { @@ -30,6 +29,6 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) } var nodes []swarm.Node - err = json.NewDecoder(resp.body).Decode(&nodes) + err = json.NewDecoder(resp.Body).Decode(&nodes) return nodes, err } diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go index e44436de..644fe138 100644 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -1,14 +1,19 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" ) // NodeRemove removes a Node. -func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options swarm.NodeRemoveOptions) error { + nodeID, err := trimID("node", nodeID) + if err != nil { + return err + } + query := url.Values{} if options.Force { query.Set("force", "1") diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go index 0d0fc3b7..62af964c 100644 --- a/vendor/github.com/docker/docker/client/node_update.go +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,6 +9,11 @@ import ( // NodeUpdate updates a Node. func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + nodeID, err := trimID("node", nodeID) + if err != nil { + return err + } + query := url.Values{} query.Set("version", version.String()) resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go index ddb0ca39..6f68fc2b 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/docker/docker/client/options.go @@ -6,11 +6,13 @@ import ( "net/http" "os" "path/filepath" + "strings" "time" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel/trace" ) @@ -194,8 +196,8 @@ func WithTLSClientConfigFromEnv() Opt { // (see [WithAPIVersionNegotiation]). func WithVersion(version string) Opt { return func(c *Client) error { - if version != "" { - c.version = version + if v := strings.TrimPrefix(version, "v"); v != "" { + c.version = v c.manualOverride = true } return nil @@ -226,8 +228,13 @@ func WithAPIVersionNegotiation() Opt { // WithTraceProvider sets the trace provider for the client. // If this is not set then the global trace provider will be used. func WithTraceProvider(provider trace.TracerProvider) Opt { + return WithTraceOptions(otelhttp.WithTracerProvider(provider)) +} + +// WithTraceOptions sets tracing span options for the client. +func WithTraceOptions(opts ...otelhttp.Option) Opt { return func(c *Client) error { - c.tp = provider + c.traceOpts = append(c.traceOpts, opts...) return nil } } diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index bf3e9b1c..385fdf05 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -7,8 +7,8 @@ import ( "strings" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/errdefs" ) // Ping pings the server and returns the value of the "Docker-Experimental", @@ -28,49 +28,54 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { if err != nil { return ping, err } - serverResp, err := cli.doRequest(req) - if err == nil { - defer ensureReaderClosed(serverResp) - switch serverResp.statusCode { + resp, err := cli.doRequest(req) + if err != nil { + if IsErrConnectionFailed(err) { + return ping, err + } + // We managed to connect, but got some error; continue and try GET request. + } else { + defer ensureReaderClosed(resp) + switch resp.StatusCode { case http.StatusOK, http.StatusInternalServerError: // Server handled the request, so parse the response - return parsePingResponse(cli, serverResp) + return parsePingResponse(cli, resp) } - } else if IsErrConnectionFailed(err) { - return ping, err } // HEAD failed; fallback to GET. req.Method = http.MethodGet - serverResp, err = cli.doRequest(req) - defer ensureReaderClosed(serverResp) + resp, err = cli.doRequest(req) + defer ensureReaderClosed(resp) if err != nil { return ping, err } - return parsePingResponse(cli, serverResp) + return parsePingResponse(cli, resp) } -func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { +func parsePingResponse(cli *Client, resp *http.Response) (types.Ping, error) { + if resp == nil { + return types.Ping{}, nil + } + var ping types.Ping - if resp.header == nil { - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) + if resp.Header == nil { + return ping, cli.checkResponseErr(resp) } - ping.APIVersion = resp.header.Get("API-Version") - ping.OSType = resp.header.Get("OSType") - if resp.header.Get("Docker-Experimental") == "true" { + ping.APIVersion = resp.Header.Get("Api-Version") + ping.OSType = resp.Header.Get("Ostype") + if resp.Header.Get("Docker-Experimental") == "true" { ping.Experimental = true } - if bv := resp.header.Get("Builder-Version"); bv != "" { - ping.BuilderVersion = types.BuilderVersion(bv) + if bv := resp.Header.Get("Builder-Version"); bv != "" { + ping.BuilderVersion = build.BuilderVersion(bv) } - if si := resp.header.Get("Swarm"); si != "" { + if si := resp.Header.Get("Swarm"); si != "" { state, role, _ := strings.Cut(si, "/") ping.SwarmStatus = &swarm.Status{ NodeState: swarm.LocalNodeState(state), ControlAvailable: role == "manager", } } - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) + return ping, cli.checkResponseErr(resp) } diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go index b95dbaf6..eaba7ee6 100644 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go index 01f6574f..4049b1b6 100644 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,6 +9,10 @@ import ( // PluginDisable disables a plugin func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + name, err := trimID("plugin", name) + if err != nil { + return err + } query := url.Values{} if options.Force { query.Set("force", "1") diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go index 736da48b..61185693 100644 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -10,6 +10,10 @@ import ( // PluginEnable enables a plugin func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + name, err := trimID("plugin", name) + if err != nil { + return err + } query := url.Values{} query.Set("timeout", strconv.Itoa(options.Timeout)) diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go index f09e4606..eaedeb8a 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" @@ -11,8 +11,9 @@ import ( // PluginInspectWithRaw inspects an existing plugin func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { - if name == "" { - return nil, nil, objectNotFoundError{object: "plugin", id: name} + name, err := trimID("plugin", name) + if err != nil { + return nil, nil, err } resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) defer ensureReaderClosed(resp) @@ -20,7 +21,7 @@ func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*type return nil, nil, err } - body, err := io.ReadAll(resp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index a0d8c350..5fd2ff21 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -7,15 +7,15 @@ import ( "net/http" "net/url" + cerrdefs "github.com/containerd/errdefs" "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // PluginInstall installs a plugin -func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (_ io.ReadCloser, retErr error) { query := url.Values{} if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { return nil, errors.Wrap(err, "invalid remote reference") @@ -35,46 +35,46 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types return nil, err } - name = resp.header.Get("Docker-Plugin-Name") + name = resp.Header.Get("Docker-Plugin-Name") pr, pw := io.Pipe() go func() { // todo: the client should probably be designed more around the actual api - _, err := io.Copy(pw, resp.body) + _, err := io.Copy(pw, resp.Body) if err != nil { - pw.CloseWithError(err) + _ = pw.CloseWithError(err) return } defer func() { - if err != nil { + if retErr != nil { delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) ensureReaderClosed(delResp) } }() if len(options.Args) > 0 { if err := cli.PluginSet(ctx, name, options.Args); err != nil { - pw.CloseWithError(err) + _ = pw.CloseWithError(err) return } } if options.Disabled { - pw.Close() + _ = pw.Close() return } enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) - pw.CloseWithError(enableErr) + _ = pw.CloseWithError(enableErr) }() return pr, nil } -func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { return cli.get(ctx, "/plugins/privileges", query, http.Header{ registry.AuthHeader: {registryAuth}, }) } -func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (*http.Response, error) { return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{ registry.AuthHeader: {registryAuth}, }) @@ -82,7 +82,7 @@ func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileg func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { // todo: do inspect before to check existing name before checking privileges newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { @@ -98,7 +98,7 @@ func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, } var privileges types.PluginPrivileges - if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + if err := json.NewDecoder(resp.Body).Decode(&privileges); err != nil { ensureReaderClosed(resp) return nil, err } diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index 2091a054..f314e17f 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -28,6 +28,6 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P return plugins, err } - err = json.NewDecoder(resp.body).Decode(&plugins) + err = json.NewDecoder(resp.Body).Decode(&plugins) return plugins, err } diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go index 8f68a86e..4574dcdd 100644 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -10,11 +10,15 @@ import ( // PluginPush pushes a plugin to a registry func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + name, err := trimID("plugin", name) + if err != nil { + return nil, err + } resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{ registry.AuthHeader: {registryAuth}, }) if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go index 4cd66958..2ba0a8cc 100644 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,6 +9,11 @@ import ( // PluginRemove removes a plugin func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + name, err := trimID("plugin", name) + if err != nil { + return err + } + query := url.Values{} if options.Force { query.Set("force", "1") diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go index dcf5752c..f0e4a0c3 100644 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -6,6 +6,11 @@ import ( // PluginSet modifies settings for an existing plugin func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + name, err := trimID("plugin", name) + if err != nil { + return err + } + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go index 5cade450..cd0cf4d2 100644 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -13,7 +13,12 @@ import ( ) // PluginUpgrade upgrades a plugin -func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) { + name, err := trimID("plugin", name) + if err != nil { + return nil, err + } + if err := cli.NewVersionError(ctx, "1.26", "plugin upgrade"); err != nil { return nil, err } @@ -32,10 +37,10 @@ func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } -func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (*http.Response, error) { return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{ registry.AuthHeader: {registryAuth}, }) diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 6eea9b4e..254138fc 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" @@ -15,51 +15,42 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int - reqURL *url.URL -} - // head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) } // get sends an http request to the docker API using the method GET with a specific Go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) } // post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (*http.Response, error) { body, headers, err := encodeBody(obj, headers) if err != nil { - return serverResponse{}, err + return nil, err } return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (*http.Response, error) { body, headers, err := encodeBody(obj, headers) if err != nil { - return serverResponse{}, err + return nil, err } return cli.putRaw(ctx, path, query, body, headers) } // putRaw sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { // PUT requests are expected to always have a body (apparently) // so explicitly pass an empty body to sendRequest to signal that // it should set the Content-Type header if not already present. @@ -70,7 +61,7 @@ func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, bo } // delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) } @@ -116,59 +107,58 @@ func (cli *Client) buildRequest(ctx context.Context, method, path string, body i return req, nil } -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { req, err := cli.buildRequest(ctx, method, cli.getAPIPath(ctx, path, query), body, headers) if err != nil { - return serverResponse{}, err + return nil, err } resp, err := cli.doRequest(req) switch { - case errors.Is(err, context.Canceled): - return serverResponse{}, errdefs.Cancelled(err) - case errors.Is(err, context.DeadlineExceeded): - return serverResponse{}, errdefs.Deadline(err) + case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): + return nil, err case err == nil: - err = cli.checkResponseErr(resp) + return resp, cli.checkResponseErr(resp) + default: + return resp, err } - return resp, errdefs.FromStatusCode(err, resp.statusCode) } -// FIXME(thaJeztah): Should this actually return a serverResp when a connection error occurred? -func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - +func (cli *Client) doRequest(req *http.Request) (*http.Response, error) { resp, err := cli.client.Do(req) if err != nil { if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, errConnectionFailed{fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)} + return nil, errConnectionFailed{fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)} } if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, errConnectionFailed{errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")} + return nil, errConnectionFailed{errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")} } // Don't decorate context sentinel errors; users may be comparing to // them directly. if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return serverResp, err + return nil, err } - if uErr, ok := err.(*url.Error); ok { - if nErr, ok := uErr.Err.(*net.OpError); ok { + var uErr *url.Error + if errors.As(err, &uErr) { + var nErr *net.OpError + if errors.As(uErr.Err, &nErr) { if os.IsPermission(nErr.Err) { - return serverResp, errConnectionFailed{errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)} + return nil, errConnectionFailed{errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)} } } } - if nErr, ok := err.(net.Error); ok { + var nErr net.Error + if errors.As(err, &nErr) { // FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)? if nErr.Timeout() { - return serverResp, ErrorConnectionFailed(cli.host) + return nil, connectionFailed(cli.host) } if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") { - return serverResp, ErrorConnectionFailed(cli.host) + return nil, connectionFailed(cli.host) } } @@ -192,28 +182,37 @@ func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { } } - return serverResp, errConnectionFailed{errors.Wrap(err, "error during connect")} + return nil, errConnectionFailed{errors.Wrap(err, "error during connect")} } - if resp != nil { - serverResp.statusCode = resp.StatusCode - serverResp.body = resp.Body - serverResp.header = resp.Header - } - return serverResp, nil + return resp, nil } -func (cli *Client) checkResponseErr(serverResp serverResponse) error { - if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { +func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) { + if serverResp == nil { return nil } + if serverResp.StatusCode >= http.StatusOK && serverResp.StatusCode < http.StatusBadRequest { + return nil + } + defer func() { + retErr = httpErrorFromStatusCode(retErr, serverResp.StatusCode) + }() var body []byte var err error - if serverResp.body != nil { + var reqURL string + if serverResp.Request != nil { + reqURL = serverResp.Request.URL.String() + } + statusMsg := serverResp.Status + if statusMsg == "" { + statusMsg = http.StatusText(serverResp.StatusCode) + } + if serverResp.Body != nil { bodyMax := 1 * 1024 * 1024 // 1 MiB bodyR := &io.LimitedReader{ - R: serverResp.body, + R: serverResp.Body, N: int64(bodyMax), } body, err = io.ReadAll(bodyR) @@ -221,21 +220,54 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error { return err } if bodyR.N == 0 { - return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) + if reqURL != "" { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", statusMsg, bodyMax, reqURL) + } + return fmt.Errorf("request returned %s with a message (> %d bytes); check if the server supports the requested API version", statusMsg, bodyMax) } } if len(body) == 0 { - return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + if reqURL != "" { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", statusMsg, reqURL) + } + return fmt.Errorf("request returned %s; check if the server supports the requested API version", statusMsg) } var daemonErr error - if serverResp.header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) { + if serverResp.Header.Get("Content-Type") == "application/json" { var errorResponse types.ErrorResponse if err := json.Unmarshal(body, &errorResponse); err != nil { return errors.Wrap(err, "Error reading JSON") } - daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) + if errorResponse.Message == "" { + // Error-message is empty, which means that we successfully parsed the + // JSON-response (no error produced), but it didn't contain an error + // message. This could either be because the response was empty, or + // the response was valid JSON, but not with the expected schema + // ([types.ErrorResponse]). + // + // We cannot use "strict" JSON handling (json.NewDecoder with DisallowUnknownFields) + // due to the API using an open schema (we must anticipate fields + // being added to [types.ErrorResponse] in the future, and not + // reject those responses. + // + // For these cases, we construct an error with the status-code + // returned, but we could consider returning (a truncated version + // of) the actual response as-is. + // + // TODO(thaJeztah): consider adding a log.Debug to allow clients to debug the actual response when enabling debug logging. + daemonErr = fmt.Errorf(`API returned a %d (%s) but provided no error-message`, + serverResp.StatusCode, + http.StatusText(serverResp.StatusCode), + ) + } else { + daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) + } } else { + // Fall back to returning the response as-is for API versions < 1.24 + // that didn't support JSON error responses, and for situations + // where a plain text error is returned. This branch may also catch + // situations where a proxy is involved, returning a HTML response. daemonErr = errors.New(strings.TrimSpace(string(body))) } return errors.Wrap(daemonErr, "Error response from daemon") @@ -275,10 +307,16 @@ func encodeData(data interface{}) (*bytes.Buffer, error) { return params, nil } -func ensureReaderClosed(response serverResponse) { - if response.body != nil { +func ensureReaderClosed(response *http.Response) { + if response != nil && response.Body != nil { // Drain up to 512 bytes and close the body to let the Transport reuse the connection - _, _ = io.CopyN(io.Discard, response.body, 512) - _ = response.body.Close() + // see https://github.com/google/go-github/pull/317/files#r57536827 + // + // TODO(thaJeztah): see if this optimization is still needed, or already implemented in stdlib, + // and check if context-cancellation should handle this as well. If still needed, consider + // wrapping response.Body, or returning a "closer()" from [Client.sendRequest] and related + // methods. + _, _ = io.CopyN(io.Discard, response.Body, 512) + _ = response.Body.Close() } } diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index 7b7f1ba7..be4a1da4 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -1,25 +1,24 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" ) // SecretCreate creates a new secret. -func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { - var response types.SecretCreateResponse +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (swarm.SecretCreateResponse, error) { if err := cli.NewVersionError(ctx, "1.25", "secret create"); err != nil { - return response, err + return swarm.SecretCreateResponse{}, err } resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) defer ensureReaderClosed(resp) if err != nil { - return response, err + return swarm.SecretCreateResponse{}, err } - err = json.NewDecoder(resp.body).Decode(&response) + var response swarm.SecretCreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index a9cb5988..f44c00e7 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" @@ -11,11 +11,12 @@ import ( // SecretInspectWithRaw returns the secret information with raw data func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if err := cli.NewVersionError(ctx, "1.25", "secret inspect"); err != nil { + id, err := trimID("secret", id) + if err != nil { return swarm.Secret{}, nil, err } - if id == "" { - return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} + if err := cli.NewVersionError(ctx, "1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err } resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) defer ensureReaderClosed(resp) @@ -23,7 +24,7 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S return swarm.Secret{}, nil, err } - body, err := io.ReadAll(resp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return swarm.Secret{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go index 4d21639e..2e37bda2 100644 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -1,17 +1,16 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" ) // SecretList returns the list of secrets. -func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { +func (cli *Client) SecretList(ctx context.Context, options swarm.SecretListOptions) ([]swarm.Secret, error) { if err := cli.NewVersionError(ctx, "1.25", "secret list"); err != nil { return nil, err } @@ -33,6 +32,6 @@ func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptio } var secrets []swarm.Secret - err = json.NewDecoder(resp.body).Decode(&secrets) + err = json.NewDecoder(resp.Body).Decode(&secrets) return secrets, err } diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index 079ed673..d1044aaf 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -1,9 +1,13 @@ -package client // import "github.com/docker/docker/client" +package client import "context" // SecretRemove removes a secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { + id, err := trimID("secret", id) + if err != nil { + return err + } if err := cli.NewVersionError(ctx, "1.25", "secret remove"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index 9dfe6719..a0aff7cb 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,6 +9,10 @@ import ( // SecretUpdate attempts to update a secret. func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + id, err := trimID("secret", id) + if err != nil { + return err + } if err := cli.NewVersionError(ctx, "1.25", "secret update"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index b72cb420..db7566a8 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -8,7 +8,6 @@ import ( "strings" "github.com/distribution/reference" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" @@ -17,7 +16,7 @@ import ( ) // ServiceCreate creates a new service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) { +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) { var response swarm.ServiceCreateResponse // Make sure we negotiated (if the client is configured to do so), @@ -37,6 +36,11 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, if err := validateServiceSpec(service); err != nil { return response, err } + if versions.LessThan(cli.version, "1.30") { + if err := validateAPIVersion(service, cli.version); err != nil { + return response, err + } + } // ensure that the image is tagged var resolveWarning string @@ -73,7 +77,7 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, return response, err } - err = json.NewDecoder(resp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) if resolveWarning != "" { response.Warnings = append(response.Warnings, resolveWarning) } @@ -191,3 +195,18 @@ func validateServiceSpec(s swarm.ServiceSpec) error { } return nil } + +func validateAPIVersion(c swarm.ServiceSpec, apiVersion string) error { + for _, m := range c.TaskTemplate.ContainerSpec.Mounts { + if m.BindOptions != nil { + if m.BindOptions.NonRecursive && versions.LessThan(apiVersion, "1.40") { + return errors.Errorf("bind-recursive=disabled requires API v1.40 or later") + } + // ReadOnlyNonRecursive can be safely ignored when API < 1.44 + if m.BindOptions.ReadOnlyForceRecursive && versions.LessThan(apiVersion, "1.44") { + return errors.Errorf("bind-recursive=readonly requires API v1.44 or later") + } + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go index cee020c9..cb25ade1 100644 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" @@ -8,24 +8,25 @@ import ( "io" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" ) // ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { - if serviceID == "" { - return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts swarm.ServiceInspectOptions) (swarm.Service, []byte, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return swarm.Service{}, nil, err } + query := url.Values{} query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) - serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Service{}, nil, err } - body, err := io.ReadAll(serverResp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return swarm.Service{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go index f97ec75a..26b25ff0 100644 --- a/vendor/github.com/docker/docker/client/service_list.go +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -1,17 +1,16 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" ) // ServiceList returns the list of services. -func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { +func (cli *Client) ServiceList(ctx context.Context, options swarm.ServiceListOptions) ([]swarm.Service, error) { query := url.Values{} if options.Filters.Len() > 0 { @@ -34,6 +33,6 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt } var services []swarm.Service - err = json.NewDecoder(resp.body).Decode(&services) + err = json.NewDecoder(resp.Body).Decode(&services) return services, err } diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go index e9e30a2a..8bf04082 100644 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -14,6 +14,11 @@ import ( // ServiceLogs returns the logs generated by a service in an io.ReadCloser. // It's up to the caller to close the stream. func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return nil, err + } + query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") @@ -48,5 +53,5 @@ func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options co if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go index 2c46326e..0c7cc571 100644 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -1,9 +1,14 @@ -package client // import "github.com/docker/docker/client" +package client import "context" // ServiceRemove kills and removes a service. func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + serviceID, err := trimID("service", serviceID) + if err != nil { + return err + } + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) defer ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index d2f03f02..278e305d 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -6,7 +6,6 @@ import ( "net/http" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" @@ -15,8 +14,11 @@ import ( // ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. // It should be the value as set *before* the update. You can find this value in the Meta field // of swarm.Service, which can be found using ServiceInspectWithRaw. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) { - response := swarm.ServiceUpdateResponse{} +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return swarm.ServiceUpdateResponse{}, err + } // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. @@ -24,7 +26,7 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version // Normally, version-negotiation (if enabled) would not happen until // the API request is made. if err := cli.checkVersion(ctx); err != nil { - return response, err + return swarm.ServiceUpdateResponse{}, err } query := url.Values{} @@ -39,7 +41,7 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version query.Set("version", version.String()) if err := validateServiceSpec(service); err != nil { - return response, err + return swarm.ServiceUpdateResponse{}, err } // ensure that the image is tagged @@ -74,10 +76,11 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) defer ensureReaderClosed(resp) if err != nil { - return response, err + return swarm.ServiceUpdateResponse{}, err } - err = json.NewDecoder(resp.body).Decode(&response) + var response swarm.ServiceUpdateResponse + err = json.NewDecoder(resp.Body).Decode(&response) if resolveWarning != "" { response.Warnings = append(response.Warnings, resolveWarning) } diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go index 19f59dd5..41151f6c 100644 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -1,21 +1,21 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" ) // SwarmGetUnlockKey retrieves the swarm's unlock key. -func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { - serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) - defer ensureReaderClosed(serverResp) +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (swarm.UnlockKeyResponse, error) { + resp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(resp) if err != nil { - return types.SwarmUnlockKeyResponse{}, err + return swarm.UnlockKeyResponse{}, err } - var response types.SwarmUnlockKeyResponse - err = json.NewDecoder(serverResp.body).Decode(&response) + var response swarm.UnlockKeyResponse + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go index da3c1637..7f291654 100644 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,13 +9,13 @@ import ( // SwarmInit initializes the swarm. func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { - serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(resp) if err != nil { return "", err } var response string - err = json.NewDecoder(serverResp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go index b52b67a8..597693bd 100644 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,13 +9,13 @@ import ( // SwarmInspect inspects the swarm. func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { - serverResp, err := cli.get(ctx, "/swarm", nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Swarm{}, err } var response swarm.Swarm - err = json.NewDecoder(serverResp.body).Decode(&response) + err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go index a1cf0455..446d4d04 100644 --- a/vendor/github.com/docker/docker/client/swarm_join.go +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go index 90ca84b3..709e5adb 100644 --- a/vendor/github.com/docker/docker/client/swarm_leave.go +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go index d2412f7d..e3c756b6 100644 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -8,7 +8,7 @@ import ( // SwarmUnlock unlocks locked swarm. func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { - serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) - ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go index 9fde7d75..309ab194 100644 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go index dde1f6c5..ca3924fc 100644 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" @@ -11,16 +11,18 @@ import ( // TaskInspectWithRaw returns the task information and its raw representation. func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - if taskID == "" { - return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} + taskID, err := trimID("task", taskID) + if err != nil { + return swarm.Task{}, nil, err } - serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) - defer ensureReaderClosed(serverResp) + + resp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Task{}, nil, err } - body, err := io.ReadAll(serverResp.body) + body, err := io.ReadAll(resp.Body) if err != nil { return swarm.Task{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go index 4869b444..de743e99 100644 --- a/vendor/github.com/docker/docker/client/task_list.go +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -1,17 +1,16 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" ) // TaskList returns the list of tasks. -func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { +func (cli *Client) TaskList(ctx context.Context, options swarm.TaskListOptions) ([]swarm.Task, error) { query := url.Values{} if options.Filters.Len() > 0 { @@ -30,6 +29,6 @@ func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) } var tasks []swarm.Task - err = json.NewDecoder(resp.body).Decode(&tasks) + err = json.NewDecoder(resp.Body).Decode(&tasks) return tasks, err } diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go index b8c20e71..baa55528 100644 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -47,5 +47,5 @@ func (cli *Client) TaskLogs(ctx context.Context, taskID string, options containe if err != nil { return nil, err } - return resp.body, nil + return resp.Body, nil } diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go index 7f3ff44e..67e1e693 100644 --- a/vendor/github.com/docker/docker/client/utils.go +++ b/vendor/github.com/docker/docker/client/utils.go @@ -1,13 +1,35 @@ -package client // import "github.com/docker/docker/client" +package client import ( + "encoding/json" + "fmt" "net/url" - "regexp" + "strings" + cerrdefs "github.com/containerd/errdefs" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/internal/lazyregexp" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) +var headerRegexp = lazyregexp.New(`\ADocker/.+\s\((.+)\)\z`) + +type emptyIDError string + +func (e emptyIDError) InvalidParameter() {} + +func (e emptyIDError) Error() string { + return "invalid " + string(e) + " name or ID: value is empty" +} + +// trimID trims the given object-ID / name, returning an error if it's empty. +func trimID(objType, id string) (string, error) { + id = strings.TrimSpace(id) + if id == "" { + return "", emptyIDError(objType) + } + return id, nil +} // getDockerOS returns the operating system based on the server header from the daemon. func getDockerOS(serverHeader string) string { @@ -32,3 +54,43 @@ func getFiltersQuery(f filters.Args) (url.Values, error) { } return query, nil } + +// encodePlatforms marshals the given platform(s) to JSON format, to +// be used for query-parameters for filtering / selecting platforms. +func encodePlatforms(platform ...ocispec.Platform) ([]string, error) { + if len(platform) == 0 { + return []string{}, nil + } + if len(platform) == 1 { + p, err := encodePlatform(&platform[0]) + if err != nil { + return nil, err + } + return []string{p}, nil + } + + seen := make(map[string]struct{}, len(platform)) + out := make([]string, 0, len(platform)) + for i := range platform { + p, err := encodePlatform(&platform[i]) + if err != nil { + return nil, err + } + if _, ok := seen[p]; !ok { + out = append(out, p) + seen[p] = struct{}{} + } + } + return out, nil +} + +// encodePlatform marshals the given platform to JSON format, to +// be used for query-parameters for filtering / selecting platforms. It +// is used as a helper for encodePlatforms, +func encodePlatform(platform *ocispec.Platform) (string, error) { + p, err := json.Marshal(platform) + if err != nil { + return "", fmt.Errorf("%w: invalid platform: %v", cerrdefs.ErrInvalidArgument, err) + } + return string(p), nil +} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go index 8f17ff4e..046af16c 100644 --- a/vendor/github.com/docker/docker/client/version.go +++ b/vendor/github.com/docker/docker/client/version.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -16,6 +16,6 @@ func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { } var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) + err = json.NewDecoder(resp.Body).Decode(&server) return server, err } diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go index b3b18243..1aad3f47 100644 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,12 +9,13 @@ import ( // VolumeCreate creates a volume in the docker host. func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { - var vol volume.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) defer ensureReaderClosed(resp) if err != nil { - return vol, err + return volume.Volume{}, err } - err = json.NewDecoder(resp.body).Decode(&vol) + + var vol volume.Volume + err = json.NewDecoder(resp.Body).Decode(&vol) return vol, err } diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go index b3ba4e60..389a4a71 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" @@ -17,21 +17,23 @@ func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.V // VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { - if volumeID == "" { - return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + volumeID, err := trimID("volume", volumeID) + if err != nil { + return volume.Volume{}, nil, err } - var vol volume.Volume resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) defer ensureReaderClosed(resp) if err != nil { - return vol, nil, err + return volume.Volume{}, nil, err } - body, err := io.ReadAll(resp.body) + body, err := io.ReadAll(resp.Body) if err != nil { - return vol, nil, err + return volume.Volume{}, nil, err } + + var vol volume.Volume rdr := bytes.NewReader(body) err = json.NewDecoder(rdr).Decode(&vol) return vol, body, err diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index d5ea9827..61ed518c 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -11,23 +11,23 @@ import ( // VolumeList returns the volumes configured in the docker host. func (cli *Client) VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) { - var volumes volume.ListResponse query := url.Values{} if options.Filters.Len() > 0 { //nolint:staticcheck // ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) if err != nil { - return volumes, err + return volume.ListResponse{}, err } query.Set("filters", filterJSON) } resp, err := cli.get(ctx, "/volumes", query, nil) defer ensureReaderClosed(resp) if err != nil { - return volumes, err + return volume.ListResponse{}, err } - err = json.NewDecoder(resp.body).Decode(&volumes) + var volumes volume.ListResponse + err = json.NewDecoder(resp.Body).Decode(&volumes) return volumes, err } diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index 9b09c30f..e22f0072 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -11,25 +11,24 @@ import ( // VolumesPrune requests the daemon to delete unused data func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (volume.PruneReport, error) { - var report volume.PruneReport - if err := cli.NewVersionError(ctx, "1.25", "volume prune"); err != nil { - return report, err + return volume.PruneReport{}, err } query, err := getFiltersQuery(pruneFilters) if err != nil { - return report, err + return volume.PruneReport{}, err } - serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) + resp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(resp) if err != nil { - return report, err + return volume.PruneReport{}, err } - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving volume prune report: %v", err) + var report volume.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return volume.PruneReport{}, fmt.Errorf("Error retrieving volume prune report: %v", err) } return report, nil diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index b8bdc5ae..e2a53fa9 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -9,6 +9,11 @@ import ( // VolumeRemove removes a volume from the docker host. func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return err + } + query := url.Values{} if force { // Make sure we negotiated (if the client is configured to do so), diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go index 151863f0..879932f0 100644 --- a/vendor/github.com/docker/docker/client/volume_update.go +++ b/vendor/github.com/docker/docker/client/volume_update.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -11,6 +11,10 @@ import ( // VolumeUpdate updates a volume. This only works for Cluster Volumes, and // only some fields can be updated. func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return err + } if err := cli.NewVersionError(ctx, "1.42", "volume update"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go deleted file mode 100644 index a5523c3e..00000000 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ /dev/null @@ -1,69 +0,0 @@ -package errdefs - -// ErrNotFound signals that the requested object doesn't exist -type ErrNotFound interface { - NotFound() -} - -// ErrInvalidParameter signals that the user input is invalid -type ErrInvalidParameter interface { - InvalidParameter() -} - -// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. -// A change in state should be able to clear this error. -type ErrConflict interface { - Conflict() -} - -// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action -type ErrUnauthorized interface { - Unauthorized() -} - -// ErrUnavailable signals that the requested action/subsystem is not available. -type ErrUnavailable interface { - Unavailable() -} - -// ErrForbidden signals that the requested action cannot be performed under any circumstances. -// When a ErrForbidden is returned, the caller should never retry the action. -type ErrForbidden interface { - Forbidden() -} - -// ErrSystem signals that some internal error occurred. -// An example of this would be a failed mount request. -type ErrSystem interface { - System() -} - -// ErrNotModified signals that an action can't be performed because it's already in the desired state -type ErrNotModified interface { - NotModified() -} - -// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. -type ErrNotImplemented interface { - NotImplemented() -} - -// ErrUnknown signals that the kind of error that occurred is not known. -type ErrUnknown interface { - Unknown() -} - -// ErrCancelled signals that the action was cancelled. -type ErrCancelled interface { - Cancelled() -} - -// ErrDeadline signals that the deadline was reached before the action completed. -type ErrDeadline interface { - DeadlineExceeded() -} - -// ErrDataLoss indicates that data was lost or there is data corruption. -type ErrDataLoss interface { - DataLoss() -} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go deleted file mode 100644 index c211f174..00000000 --- a/vendor/github.com/docker/docker/errdefs/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. -// Errors that cross the package boundary should implement one (and only one) of these interfaces. -// -// Packages should not reference these interfaces directly, only implement them. -// To check if a particular error implements one of these interfaces, there are helper -// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. -// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). -package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go deleted file mode 100644 index 042de4b7..00000000 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ /dev/null @@ -1,279 +0,0 @@ -package errdefs - -import "context" - -type errNotFound struct{ error } - -func (errNotFound) NotFound() {} - -func (e errNotFound) Cause() error { - return e.error -} - -func (e errNotFound) Unwrap() error { - return e.error -} - -// NotFound is a helper to create an error of the class with the same name from any error type -func NotFound(err error) error { - if err == nil || IsNotFound(err) { - return err - } - return errNotFound{err} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { - return e.error -} - -func (e errInvalidParameter) Unwrap() error { - return e.error -} - -// InvalidParameter is a helper to create an error of the class with the same name from any error type -func InvalidParameter(err error) error { - if err == nil || IsInvalidParameter(err) { - return err - } - return errInvalidParameter{err} -} - -type errConflict struct{ error } - -func (errConflict) Conflict() {} - -func (e errConflict) Cause() error { - return e.error -} - -func (e errConflict) Unwrap() error { - return e.error -} - -// Conflict is a helper to create an error of the class with the same name from any error type -func Conflict(err error) error { - if err == nil || IsConflict(err) { - return err - } - return errConflict{err} -} - -type errUnauthorized struct{ error } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) Cause() error { - return e.error -} - -func (e errUnauthorized) Unwrap() error { - return e.error -} - -// Unauthorized is a helper to create an error of the class with the same name from any error type -func Unauthorized(err error) error { - if err == nil || IsUnauthorized(err) { - return err - } - return errUnauthorized{err} -} - -type errUnavailable struct{ error } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) Cause() error { - return e.error -} - -func (e errUnavailable) Unwrap() error { - return e.error -} - -// Unavailable is a helper to create an error of the class with the same name from any error type -func Unavailable(err error) error { - if err == nil || IsUnavailable(err) { - return err - } - return errUnavailable{err} -} - -type errForbidden struct{ error } - -func (errForbidden) Forbidden() {} - -func (e errForbidden) Cause() error { - return e.error -} - -func (e errForbidden) Unwrap() error { - return e.error -} - -// Forbidden is a helper to create an error of the class with the same name from any error type -func Forbidden(err error) error { - if err == nil || IsForbidden(err) { - return err - } - return errForbidden{err} -} - -type errSystem struct{ error } - -func (errSystem) System() {} - -func (e errSystem) Cause() error { - return e.error -} - -func (e errSystem) Unwrap() error { - return e.error -} - -// System is a helper to create an error of the class with the same name from any error type -func System(err error) error { - if err == nil || IsSystem(err) { - return err - } - return errSystem{err} -} - -type errNotModified struct{ error } - -func (errNotModified) NotModified() {} - -func (e errNotModified) Cause() error { - return e.error -} - -func (e errNotModified) Unwrap() error { - return e.error -} - -// NotModified is a helper to create an error of the class with the same name from any error type -func NotModified(err error) error { - if err == nil || IsNotModified(err) { - return err - } - return errNotModified{err} -} - -type errNotImplemented struct{ error } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) Cause() error { - return e.error -} - -func (e errNotImplemented) Unwrap() error { - return e.error -} - -// NotImplemented is a helper to create an error of the class with the same name from any error type -func NotImplemented(err error) error { - if err == nil || IsNotImplemented(err) { - return err - } - return errNotImplemented{err} -} - -type errUnknown struct{ error } - -func (errUnknown) Unknown() {} - -func (e errUnknown) Cause() error { - return e.error -} - -func (e errUnknown) Unwrap() error { - return e.error -} - -// Unknown is a helper to create an error of the class with the same name from any error type -func Unknown(err error) error { - if err == nil || IsUnknown(err) { - return err - } - return errUnknown{err} -} - -type errCancelled struct{ error } - -func (errCancelled) Cancelled() {} - -func (e errCancelled) Cause() error { - return e.error -} - -func (e errCancelled) Unwrap() error { - return e.error -} - -// Cancelled is a helper to create an error of the class with the same name from any error type -func Cancelled(err error) error { - if err == nil || IsCancelled(err) { - return err - } - return errCancelled{err} -} - -type errDeadline struct{ error } - -func (errDeadline) DeadlineExceeded() {} - -func (e errDeadline) Cause() error { - return e.error -} - -func (e errDeadline) Unwrap() error { - return e.error -} - -// Deadline is a helper to create an error of the class with the same name from any error type -func Deadline(err error) error { - if err == nil || IsDeadline(err) { - return err - } - return errDeadline{err} -} - -type errDataLoss struct{ error } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) Cause() error { - return e.error -} - -func (e errDataLoss) Unwrap() error { - return e.error -} - -// DataLoss is a helper to create an error of the class with the same name from any error type -func DataLoss(err error) error { - if err == nil || IsDataLoss(err) { - return err - } - return errDataLoss{err} -} - -// FromContext returns the error class from the passed in context -func FromContext(ctx context.Context) error { - e := ctx.Err() - if e == nil { - return nil - } - - if e == context.Canceled { - return Cancelled(e) - } - if e == context.DeadlineExceeded { - return Deadline(e) - } - return Unknown(e) -} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go deleted file mode 100644 index ebcd7893..00000000 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ /dev/null @@ -1,46 +0,0 @@ -package errdefs - -import ( - "net/http" -) - -// FromStatusCode creates an errdef error, based on the provided HTTP status-code -func FromStatusCode(err error, statusCode int) error { - if err == nil { - return nil - } - switch statusCode { - case http.StatusNotFound: - err = NotFound(err) - case http.StatusBadRequest: - err = InvalidParameter(err) - case http.StatusConflict: - err = Conflict(err) - case http.StatusUnauthorized: - err = Unauthorized(err) - case http.StatusServiceUnavailable: - err = Unavailable(err) - case http.StatusForbidden: - err = Forbidden(err) - case http.StatusNotModified: - err = NotModified(err) - case http.StatusNotImplemented: - err = NotImplemented(err) - case http.StatusInternalServerError: - if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { - err = System(err) - } - default: - switch { - case statusCode >= 200 && statusCode < 400: - // it's a client error - case statusCode >= 400 && statusCode < 500: - err = InvalidParameter(err) - case statusCode >= 500 && statusCode < 600: - err = System(err) - default: - err = Unknown(err) - } - } - return err -} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go deleted file mode 100644 index f94034cb..00000000 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ /dev/null @@ -1,123 +0,0 @@ -package errdefs - -import ( - "context" - "errors" -) - -type causer interface { - Cause() error -} - -type wrapErr interface { - Unwrap() error -} - -func getImplementer(err error) error { - switch e := err.(type) { - case - ErrNotFound, - ErrInvalidParameter, - ErrConflict, - ErrUnauthorized, - ErrUnavailable, - ErrForbidden, - ErrSystem, - ErrNotModified, - ErrNotImplemented, - ErrCancelled, - ErrDeadline, - ErrDataLoss, - ErrUnknown: - return err - case causer: - return getImplementer(e.Cause()) - case wrapErr: - return getImplementer(e.Unwrap()) - default: - return err - } -} - -// IsNotFound returns if the passed in error is an ErrNotFound -func IsNotFound(err error) bool { - _, ok := getImplementer(err).(ErrNotFound) - return ok -} - -// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter -func IsInvalidParameter(err error) bool { - _, ok := getImplementer(err).(ErrInvalidParameter) - return ok -} - -// IsConflict returns if the passed in error is an ErrConflict -func IsConflict(err error) bool { - _, ok := getImplementer(err).(ErrConflict) - return ok -} - -// IsUnauthorized returns if the passed in error is an ErrUnauthorized -func IsUnauthorized(err error) bool { - _, ok := getImplementer(err).(ErrUnauthorized) - return ok -} - -// IsUnavailable returns if the passed in error is an ErrUnavailable -func IsUnavailable(err error) bool { - _, ok := getImplementer(err).(ErrUnavailable) - return ok -} - -// IsForbidden returns if the passed in error is an ErrForbidden -func IsForbidden(err error) bool { - _, ok := getImplementer(err).(ErrForbidden) - return ok -} - -// IsSystem returns if the passed in error is an ErrSystem -func IsSystem(err error) bool { - _, ok := getImplementer(err).(ErrSystem) - return ok -} - -// IsNotModified returns if the passed in error is a NotModified error -func IsNotModified(err error) bool { - _, ok := getImplementer(err).(ErrNotModified) - return ok -} - -// IsNotImplemented returns if the passed in error is an ErrNotImplemented -func IsNotImplemented(err error) bool { - _, ok := getImplementer(err).(ErrNotImplemented) - return ok -} - -// IsUnknown returns if the passed in error is an ErrUnknown -func IsUnknown(err error) bool { - _, ok := getImplementer(err).(ErrUnknown) - return ok -} - -// IsCancelled returns if the passed in error is an ErrCancelled -func IsCancelled(err error) bool { - _, ok := getImplementer(err).(ErrCancelled) - return ok -} - -// IsDeadline returns if the passed in error is an ErrDeadline -func IsDeadline(err error) bool { - _, ok := getImplementer(err).(ErrDeadline) - return ok -} - -// IsDataLoss returns if the passed in error is an ErrDataLoss -func IsDataLoss(err error) bool { - _, ok := getImplementer(err).(ErrDataLoss) - return ok -} - -// IsContext returns if the passed in error is due to context cancellation or deadline exceeded. -func IsContext(err error) bool { - return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) -} diff --git a/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go b/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go new file mode 100644 index 00000000..6334edb6 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go @@ -0,0 +1,90 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code below was largely copied from golang.org/x/mod@v0.22; +// https://github.com/golang/mod/blob/v0.22.0/internal/lazyregexp/lazyre.go +// with some additional methods added. + +// Package lazyregexp is a thin wrapper over regexp, allowing the use of global +// regexp variables without forcing them to be compiled at init. +package lazyregexp + +import ( + "os" + "regexp" + "strings" + "sync" +) + +// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be +// compiled the first time it is needed. +type Regexp struct { + str string + once sync.Once + rx *regexp.Regexp +} + +func (r *Regexp) re() *regexp.Regexp { + r.once.Do(r.build) + return r.rx +} + +func (r *Regexp) build() { + r.rx = regexp.MustCompile(r.str) + r.str = "" +} + +func (r *Regexp) FindSubmatch(s []byte) [][]byte { + return r.re().FindSubmatch(s) +} + +func (r *Regexp) FindAllStringSubmatch(s string, n int) [][]string { + return r.re().FindAllStringSubmatch(s, n) +} + +func (r *Regexp) FindStringSubmatch(s string) []string { + return r.re().FindStringSubmatch(s) +} + +func (r *Regexp) FindStringSubmatchIndex(s string) []int { + return r.re().FindStringSubmatchIndex(s) +} + +func (r *Regexp) ReplaceAllString(src, repl string) string { + return r.re().ReplaceAllString(src, repl) +} + +func (r *Regexp) FindString(s string) string { + return r.re().FindString(s) +} + +func (r *Regexp) FindAllString(s string, n int) []string { + return r.re().FindAllString(s, n) +} + +func (r *Regexp) MatchString(s string) bool { + return r.re().MatchString(s) +} + +func (r *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { + return r.re().ReplaceAllStringFunc(src, repl) +} + +func (r *Regexp) SubexpNames() []string { + return r.re().SubexpNames() +} + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +// New creates a new lazy regexp, delaying the compiling work until it is first +// needed. If the code is being run as part of tests, the regexp compiling will +// happen immediately. +func New(str string) *Regexp { + lr := &Regexp{str: str} + if inTest { + // In tests, always compile the regexps early. + lr.re() + } + return lr +} diff --git a/vendor/github.com/docker/docker/internal/multierror/multierror.go b/vendor/github.com/docker/docker/internal/multierror/multierror.go index cf4d6a59..e899f4de 100644 --- a/vendor/github.com/docker/docker/internal/multierror/multierror.go +++ b/vendor/github.com/docker/docker/internal/multierror/multierror.go @@ -36,7 +36,7 @@ func (e *joinError) Error() string { } stringErrs := make([]string, 0, len(e.errs)) for _, subErr := range e.errs { - stringErrs = append(stringErrs, strings.Replace(subErr.Error(), "\n", "\n\t", -1)) + stringErrs = append(stringErrs, strings.ReplaceAll(subErr.Error(), "\n", "\n\t")) } return "* " + strings.Join(stringErrs, "\n* ") } diff --git a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md index b63e1f8f..4b4805ad 100644 --- a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md +++ b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md @@ -7,9 +7,3 @@ When submitting code, please make every effort to follow existing conventions and style in order to keep the code as readable as possible. Please also make sure all tests pass by running `go test`, and format your code with `go fmt`. We also recommend using `golint` and `errcheck`. - -Before your code can be accepted into the project you must also sign the -Individual Contributor License Agreement. We use [cla-assistant.io][1] and you -will be prompted to sign once a pull request is opened. - -[1]: https://cla-assistant.io/ diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md index 79a7c5ec..02b57495 100644 --- a/vendor/github.com/go-jose/go-jose/v4/README.md +++ b/vendor/github.com/go-jose/go-jose/v4/README.md @@ -9,14 +9,6 @@ Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. This includes support for JSON Web Encryption, JSON Web Signature, and JSON Web Token standards. -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - ## Overview The implementation follows the @@ -109,6 +101,6 @@ allows attaching a key id. Examples can be found in the Godoc reference for this package. The -[`jose-util`](https://github.com/go-jose/go-jose/tree/v4/jose-util) +[`jose-util`](https://github.com/go-jose/go-jose/tree/main/jose-util) subdirectory also contains a small command-line utility which might be useful as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v4/jwe.go b/vendor/github.com/go-jose/go-jose/v4/jwe.go index 89f03ee3..9f1322dc 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwe.go @@ -288,10 +288,11 @@ func ParseEncryptedCompact( keyAlgorithms []KeyAlgorithm, contentEncryption []ContentEncryption, ) (*JSONWebEncryption, error) { - parts := strings.Split(input, ".") - if len(parts) != 5 { + // Five parts is four separators + if strings.Count(input, ".") != 4 { return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts") } + parts := strings.SplitN(input, ".", 5) rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) if err != nil { diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go index 8a528421..9e57e93b 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go @@ -239,10 +239,10 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { keyPub = key } } else { - err = fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv) + return fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv) } default: - err = fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty) + return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty) } if err != nil { diff --git a/vendor/github.com/go-jose/go-jose/v4/jws.go b/vendor/github.com/go-jose/go-jose/v4/jws.go index 3a912301..d09d8ba5 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jws.go +++ b/vendor/github.com/go-jose/go-jose/v4/jws.go @@ -327,10 +327,11 @@ func parseSignedCompact( payload []byte, signatureAlgorithms []SignatureAlgorithm, ) (*JSONWebSignature, error) { - parts := strings.Split(input, ".") - if len(parts) != 3 { + // Three parts is two separators + if strings.Count(input, ".") != 2 { return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") } + parts := strings.SplitN(input, ".", 3) if parts[1] != "" && payload != nil { return nil, fmt.Errorf("go-jose/go-jose: payload is not detached") diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 0cffafa7..0ed62c1a 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -1,26 +1,28 @@ +version: "2" + run: timeout: 1m tests: true linters: - disable-all: true - enable: + default: none + enable: # please keep this alphabetized + - asasalint - asciicheck + - copyloopvar + - dupl - errcheck - forcetypeassert + - goconst - gocritic - - gofmt - - goimports - - gosimple - govet - ineffassign - misspell + - musttag - revive - staticcheck - - typecheck - unused issues: - exclude-use-default: false max-issues-per-linter: 0 max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 30568e76..b22c57d7 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -77,7 +77,7 @@ func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { write: fn, } // For skipping fnlogger.Info and fnlogger.Error. - l.Formatter.AddCallDepth(1) + l.AddCallDepth(1) // via Formatter return l } @@ -164,17 +164,17 @@ type fnlogger struct { } func (l fnlogger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) + l.AddName(name) // via Formatter return &l } func (l fnlogger) WithValues(kvList ...any) logr.LogSink { - l.Formatter.AddValues(kvList) + l.AddValues(kvList) // via Formatter return &l } func (l fnlogger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) + l.AddCallDepth(depth) // via Formatter return &l } diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml deleted file mode 100644 index 841c4281..00000000 --- a/vendor/github.com/go-openapi/analysis/.codecov.yml +++ /dev/null @@ -1,5 +0,0 @@ -coverage: - status: - patch: - default: - target: 80% diff --git a/vendor/github.com/go-openapi/analysis/.gitattributes b/vendor/github.com/go-openapi/analysis/.gitattributes deleted file mode 100644 index d020be8e..00000000 --- a/vendor/github.com/go-openapi/analysis/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -*.go text eol=lf - diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore deleted file mode 100644 index 87c3bd3e..00000000 --- a/vendor/github.com/go-openapi/analysis/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -secrets.yml -coverage.out -coverage.txt -*.cov -.idea diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml deleted file mode 100644 index 22f8d21c..00000000 --- a/vendor/github.com/go-openapi/analysis/.golangci.yml +++ /dev/null @@ -1,61 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 3 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md deleted file mode 100644 index e005d4b3..00000000 --- a/vendor/github.com/go-openapi/analysis/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# OpenAPI analysis [![Build Status](https://github.com/go-openapi/analysis/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/analysis/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) - -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/analysis.svg)](https://pkg.go.dev/github.com/go-openapi/analysis) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) - - -A foundational library to analyze an OAI specification document for easier reasoning about the content. - -## What's inside? - -* An analyzer providing methods to walk the functional content of a specification -* A spec flattener producing a self-contained document bundle, while preserving `$ref`s -* A spec merger ("mixin") to merge several spec documents into a primary spec -* A spec "fixer" ensuring that response descriptions are non empty - -[Documentation](https://pkg.go.dev/github.com/go-openapi/analysis) - -## FAQ - -* Does this library support OpenAPI 3? - -> No. -> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). -> There is no plan to make it evolve toward supporting OpenAPI 3.x. -> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go deleted file mode 100644 index c17aee1b..00000000 --- a/vendor/github.com/go-openapi/analysis/analyzer.go +++ /dev/null @@ -1,1064 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - slashpath "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -type referenceAnalysis struct { - schemas map[string]spec.Ref - responses map[string]spec.Ref - parameters map[string]spec.Ref - items map[string]spec.Ref - headerItems map[string]spec.Ref - parameterItems map[string]spec.Ref - allRefs map[string]spec.Ref - pathItems map[string]spec.Ref -} - -func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { - r.allRefs["#"+key] = ref -} - -func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { - r.items["#"+key] = items.Ref - r.addRef(key, items.Ref) - if location == "header" { - // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas - // and $ref are not supported here. However it is possible to analyze this. - r.headerItems["#"+key] = items.Ref - } else { - r.parameterItems["#"+key] = items.Ref - } -} - -func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { - r.schemas["#"+key] = ref.Schema.Ref - r.addRef(key, ref.Schema.Ref) -} - -func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { - r.responses["#"+key] = resp.Ref - r.addRef(key, resp.Ref) -} - -func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { - r.parameters["#"+key] = param.Ref - r.addRef(key, param.Ref) -} - -func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { - r.pathItems["#"+key] = pathItem.Ref - r.addRef(key, pathItem.Ref) -} - -type patternAnalysis struct { - parameters map[string]string - headers map[string]string - items map[string]string - schemas map[string]string - allPatterns map[string]string -} - -func (p *patternAnalysis) addPattern(key, pattern string) { - p.allPatterns["#"+key] = pattern -} - -func (p *patternAnalysis) addParameterPattern(key, pattern string) { - p.parameters["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addHeaderPattern(key, pattern string) { - p.headers["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addItemsPattern(key, pattern string) { - p.items["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addSchemaPattern(key, pattern string) { - p.schemas["#"+key] = pattern - p.addPattern(key, pattern) -} - -type enumAnalysis struct { - parameters map[string][]interface{} - headers map[string][]interface{} - items map[string][]interface{} - schemas map[string][]interface{} - allEnums map[string][]interface{} -} - -func (p *enumAnalysis) addEnum(key string, enum []interface{}) { - p.allEnums["#"+key] = enum -} - -func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) { - p.parameters["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) { - p.headers["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) { - p.items["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) { - p.schemas["#"+key] = enum - p.addEnum(key, enum) -} - -// New takes a swagger spec object and returns an analyzed spec document. -// The analyzed document contains a number of indices that make it easier to -// reason about semantics of a swagger specification for use in code generation -// or validation etc. -func New(doc *spec.Swagger) *Spec { - a := &Spec{ - spec: doc, - references: referenceAnalysis{}, - patterns: patternAnalysis{}, - enums: enumAnalysis{}, - } - a.reset() - a.initialize() - - return a -} - -// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry -// with a bunch of utility methods to act on the information in the spec. -type Spec struct { - spec *spec.Swagger - consumes map[string]struct{} - produces map[string]struct{} - authSchemes map[string]struct{} - operations map[string]map[string]*spec.Operation - references referenceAnalysis - patterns patternAnalysis - enums enumAnalysis - allSchemas map[string]SchemaRef - allOfs map[string]SchemaRef -} - -func (s *Spec) reset() { - s.consumes = make(map[string]struct{}, 150) - s.produces = make(map[string]struct{}, 150) - s.authSchemes = make(map[string]struct{}, 150) - s.operations = make(map[string]map[string]*spec.Operation, 150) - s.allSchemas = make(map[string]SchemaRef, 150) - s.allOfs = make(map[string]SchemaRef, 150) - s.references.schemas = make(map[string]spec.Ref, 150) - s.references.pathItems = make(map[string]spec.Ref, 150) - s.references.responses = make(map[string]spec.Ref, 150) - s.references.parameters = make(map[string]spec.Ref, 150) - s.references.items = make(map[string]spec.Ref, 150) - s.references.headerItems = make(map[string]spec.Ref, 150) - s.references.parameterItems = make(map[string]spec.Ref, 150) - s.references.allRefs = make(map[string]spec.Ref, 150) - s.patterns.parameters = make(map[string]string, 150) - s.patterns.headers = make(map[string]string, 150) - s.patterns.items = make(map[string]string, 150) - s.patterns.schemas = make(map[string]string, 150) - s.patterns.allPatterns = make(map[string]string, 150) - s.enums.parameters = make(map[string][]interface{}, 150) - s.enums.headers = make(map[string][]interface{}, 150) - s.enums.items = make(map[string][]interface{}, 150) - s.enums.schemas = make(map[string][]interface{}, 150) - s.enums.allEnums = make(map[string][]interface{}, 150) -} - -func (s *Spec) reload() { - s.reset() - s.initialize() -} - -func (s *Spec) initialize() { - for _, c := range s.spec.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range s.spec.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range s.spec.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - for path, pathItem := range s.AllPaths() { - s.analyzeOperations(path, &pathItem) //#nosec - } - - for name, parameter := range s.spec.Parameters { - refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) - if parameter.Items != nil { - s.analyzeItems("items", parameter.Items, refPref, "parameter") - } - if parameter.In == "body" && parameter.Schema != nil { - s.analyzeSchema("schema", parameter.Schema, refPref) - } - if parameter.Pattern != "" { - s.patterns.addParameterPattern(refPref, parameter.Pattern) - } - if len(parameter.Enum) > 0 { - s.enums.addParameterEnum(refPref, parameter.Enum) - } - } - - for name, response := range s.spec.Responses { - refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) - for k, v := range response.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - if v.Items != nil { - s.analyzeItems("items", v.Items, hRefPref, "header") - } - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - if len(v.Enum) > 0 { - s.enums.addHeaderEnum(hRefPref, v.Enum) - } - } - if response.Schema != nil { - s.analyzeSchema("schema", response.Schema, refPref) - } - } - - for name := range s.spec.Definitions { - schema := s.spec.Definitions[name] - s.analyzeSchema(name, &schema, "/definitions") - } - // TODO: after analyzing all things and flattening schemas etc - // resolve all the collected references to their final representations - // best put in a separate method because this could get expensive -} - -func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { - // TODO: resolve refs here? - // Currently, operations declared via pathItem $ref are known only after expansion - op := pi - if pi.Ref.String() != "" { - key := slashpath.Join("/paths", jsonpointer.Escape(path)) - s.references.addPathItemRef(key, pi) - } - s.analyzeOperation("GET", path, op.Get) - s.analyzeOperation("PUT", path, op.Put) - s.analyzeOperation("POST", path, op.Post) - s.analyzeOperation("PATCH", path, op.Patch) - s.analyzeOperation("DELETE", path, op.Delete) - s.analyzeOperation("HEAD", path, op.Head) - s.analyzeOperation("OPTIONS", path, op.Options) - for i, param := range op.Parameters { - refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) //#nosec - } - if param.Pattern != "" { - s.patterns.addParameterPattern(refPref, param.Pattern) - } - if len(param.Enum) > 0 { - s.enums.addParameterEnum(refPref, param.Enum) - } - if param.Items != nil { - s.analyzeItems("items", param.Items, refPref, "parameter") - } - if param.Schema != nil { - s.analyzeSchema("schema", param.Schema, refPref) - } - } -} - -func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { - if items == nil { - return - } - refPref := slashpath.Join(prefix, name) - s.analyzeItems(name, items.Items, refPref, location) - if items.Ref.String() != "" { - s.references.addItemsRef(refPref, items, location) - } - if items.Pattern != "" { - s.patterns.addItemsPattern(refPref, items.Pattern) - } - if len(items.Enum) > 0 { - s.enums.addItemsEnum(refPref, items.Enum) - } -} - -func (s *Spec) analyzeParameter(prefix string, i int, param spec.Parameter) { - refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) //#nosec - } - - if param.Pattern != "" { - s.patterns.addParameterPattern(refPref, param.Pattern) - } - - if len(param.Enum) > 0 { - s.enums.addParameterEnum(refPref, param.Enum) - } - - s.analyzeItems("items", param.Items, refPref, "parameter") - if param.In == "body" && param.Schema != nil { - s.analyzeSchema("schema", param.Schema, refPref) - } -} - -func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { - if op == nil { - return - } - - for _, c := range op.Consumes { - s.consumes[c] = struct{}{} - } - - for _, c := range op.Produces { - s.produces[c] = struct{}{} - } - - for _, ss := range op.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - - if _, ok := s.operations[method]; !ok { - s.operations[method] = make(map[string]*spec.Operation) - } - - s.operations[method][path] = op - prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) - for i, param := range op.Parameters { - s.analyzeParameter(prefix, i, param) - } - - if op.Responses == nil { - return - } - - if op.Responses.Default != nil { - s.analyzeDefaultResponse(prefix, op.Responses.Default) - } - - for k, res := range op.Responses.StatusCodeResponses { - s.analyzeResponse(prefix, k, res) - } -} - -func (s *Spec) analyzeDefaultResponse(prefix string, res *spec.Response) { - refPref := slashpath.Join(prefix, "responses", "default") - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, res) - } - - for k, v := range res.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - s.analyzeItems("items", v.Items, hRefPref, "header") - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - } - - if res.Schema != nil { - s.analyzeSchema("schema", res.Schema, refPref) - } -} - -func (s *Spec) analyzeResponse(prefix string, k int, res spec.Response) { - refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, &res) //#nosec - } - - for k, v := range res.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - s.analyzeItems("items", v.Items, hRefPref, "header") - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - - if len(v.Enum) > 0 { - s.enums.addHeaderEnum(hRefPref, v.Enum) - } - } - - if res.Schema != nil { - s.analyzeSchema("schema", res.Schema, refPref) - } -} - -func (s *Spec) analyzeSchema(name string, schema *spec.Schema, prefix string) { - refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) - schRef := SchemaRef{ - Name: name, - Schema: schema, - Ref: spec.MustCreateRef("#" + refURI), - TopLevel: prefix == "/definitions", - } - - s.allSchemas["#"+refURI] = schRef - - if schema.Ref.String() != "" { - s.references.addSchemaRef(refURI, schRef) - } - - if schema.Pattern != "" { - s.patterns.addSchemaPattern(refURI, schema.Pattern) - } - - if len(schema.Enum) > 0 { - s.enums.addSchemaEnum(refURI, schema.Enum) - } - - for k, v := range schema.Definitions { - v := v - s.analyzeSchema(k, &v, slashpath.Join(refURI, "definitions")) - } - - for k, v := range schema.Properties { - v := v - s.analyzeSchema(k, &v, slashpath.Join(refURI, "properties")) - } - - for k, v := range schema.PatternProperties { - v := v - // NOTE: swagger 2.0 does not support PatternProperties. - // However it is possible to analyze this in a schema - s.analyzeSchema(k, &v, slashpath.Join(refURI, "patternProperties")) - } - - for i := range schema.AllOf { - v := &schema.AllOf[i] - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) - } - - if len(schema.AllOf) > 0 { - s.allOfs["#"+refURI] = schRef - } - - for i := range schema.AnyOf { - v := &schema.AnyOf[i] - // NOTE: swagger 2.0 does not support anyOf constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) - } - - for i := range schema.OneOf { - v := &schema.OneOf[i] - // NOTE: swagger 2.0 does not support oneOf constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) - } - - if schema.Not != nil { - // NOTE: swagger 2.0 does not support "not" constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema("not", schema.Not, refURI) - } - - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - s.analyzeSchema("additionalProperties", schema.AdditionalProperties.Schema, refURI) - } - - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - // NOTE: swagger 2.0 does not support AdditionalItems. - // However it is possible to analyze this in a schema - s.analyzeSchema("additionalItems", schema.AdditionalItems.Schema, refURI) - } - - if schema.Items != nil { - if schema.Items.Schema != nil { - s.analyzeSchema("items", schema.Items.Schema, refURI) - } - - for i := range schema.Items.Schemas { - sch := &schema.Items.Schemas[i] - s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) - } - } -} - -// SecurityRequirement is a representation of a security requirement for an operation -type SecurityRequirement struct { - Name string - Scopes []string -} - -// SecurityRequirementsFor gets the security requirements for the operation -func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { - if s.spec.Security == nil && operation.Security == nil { - return nil - } - - schemes := s.spec.Security - if operation.Security != nil { - schemes = operation.Security - } - - result := [][]SecurityRequirement{} - for _, scheme := range schemes { - if len(scheme) == 0 { - // append a zero object for anonymous - result = append(result, []SecurityRequirement{{}}) - - continue - } - - var reqs []SecurityRequirement - for k, v := range scheme { - if v == nil { - v = []string{} - } - reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) - } - - result = append(result, reqs) - } - - return result -} - -// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { - result := make(map[string]spec.SecurityScheme) - - for _, v := range requirements { - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - - return result -} - -// SecurityDefinitionsFor gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { - requirements := s.SecurityRequirementsFor(operation) - if len(requirements) == 0 { - return nil - } - - result := make(map[string]spec.SecurityScheme) - for _, reqs := range requirements { - for _, v := range reqs { - if v.Name == "" { - // optional requirement - continue - } - - if _, ok := result[v.Name]; ok { - // duplicate requirement - continue - } - - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - } - - return result -} - -// ConsumesFor gets the mediatypes for the operation -func (s *Spec) ConsumesFor(operation *spec.Operation) []string { - if len(operation.Consumes) == 0 { - cons := make(map[string]struct{}, len(s.spec.Consumes)) - for _, k := range s.spec.Consumes { - cons[k] = struct{}{} - } - - return s.structMapKeys(cons) - } - - cons := make(map[string]struct{}, len(operation.Consumes)) - for _, c := range operation.Consumes { - cons[c] = struct{}{} - } - - return s.structMapKeys(cons) -} - -// ProducesFor gets the mediatypes for the operation -func (s *Spec) ProducesFor(operation *spec.Operation) []string { - if len(operation.Produces) == 0 { - prod := make(map[string]struct{}, len(s.spec.Produces)) - for _, k := range s.spec.Produces { - prod[k] = struct{}{} - } - - return s.structMapKeys(prod) - } - - prod := make(map[string]struct{}, len(operation.Produces)) - for _, c := range operation.Produces { - prod[c] = struct{}{} - } - - return s.structMapKeys(prod) -} - -func mapKeyFromParam(param *spec.Parameter) string { - return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) -} - -func fieldNameFromParam(param *spec.Parameter) string { - // TODO: this should be x-go-name - if nm, ok := param.Extensions.GetString("go-name"); ok { - return nm - } - - return swag.ToGoName(param.Name) -} - -// ErrorOnParamFunc is a callback function to be invoked -// whenever an error is encountered while resolving references -// on parameters. -// -// This function takes as input the spec.Parameter which triggered the -// error and the error itself. -// -// If the callback function returns false, the calling function should bail. -// -// If it returns true, the calling function should continue evaluating parameters. -// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). -type ErrorOnParamFunc func(spec.Parameter, error) bool - -func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { - for _, param := range parameters { - pr := param - if pr.Ref.String() == "" { - res[mapKeyFromParam(&pr)] = pr - - continue - } - - // resolve $ref - if callmeOnError == nil { - callmeOnError = func(_ spec.Parameter, err error) bool { - panic(err) - } - } - - obj, _, err := pr.Ref.GetPointer().Get(s.spec) - if err != nil { - if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) { - continue - } - - break - } - - objAsParam, ok := obj.(spec.Parameter) - if !ok { - if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) { - continue - } - - break - } - - pr = objAsParam - res[mapKeyFromParam(&pr)] = pr - } -} - -// ParametersFor the specified operation id. -// -// Assumes parameters properly resolve references if any and that -// such references actually resolve to a parameter object. -// Otherwise, panics. -func (s *Spec) ParametersFor(operationID string) []spec.Parameter { - return s.SafeParametersFor(operationID, nil) -} - -// SafeParametersFor the specified operation id. -// -// Does not assume parameters properly resolve references or that -// such references actually resolve to a parameter object. -// -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous -// parameters. If the callback is set to nil, panics upon errors. -func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { - gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { - bag := make(map[string]spec.Parameter) - s.paramsAsMap(pi.Parameters, bag, callmeOnError) - s.paramsAsMap(op.Parameters, bag, callmeOnError) - - var res []spec.Parameter - for _, v := range bag { - res = append(res, v) - } - - return res - } - - for _, pi := range s.spec.Paths.Paths { - if pi.Get != nil && pi.Get.ID == operationID { - return gatherParams(&pi, pi.Get) //#nosec - } - if pi.Head != nil && pi.Head.ID == operationID { - return gatherParams(&pi, pi.Head) //#nosec - } - if pi.Options != nil && pi.Options.ID == operationID { - return gatherParams(&pi, pi.Options) //#nosec - } - if pi.Post != nil && pi.Post.ID == operationID { - return gatherParams(&pi, pi.Post) //#nosec - } - if pi.Patch != nil && pi.Patch.ID == operationID { - return gatherParams(&pi, pi.Patch) //#nosec - } - if pi.Put != nil && pi.Put.ID == operationID { - return gatherParams(&pi, pi.Put) //#nosec - } - if pi.Delete != nil && pi.Delete.ID == operationID { - return gatherParams(&pi, pi.Delete) //#nosec - } - } - - return nil -} - -// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -// -// Assumes parameters properly resolve references if any and that -// such references actually resolve to a parameter object. -// Otherwise, panics. -func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { - return s.SafeParamsFor(method, path, nil) -} - -// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -// -// Does not assume parameters properly resolve references or that -// such references actually resolve to a parameter object. -// -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous -// parameters. If the callback is set to nil, panics upon errors. -func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { - res := make(map[string]spec.Parameter) - if pi, ok := s.spec.Paths.Paths[path]; ok { - s.paramsAsMap(pi.Parameters, res, callmeOnError) - s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) - } - - return res -} - -// OperationForName gets the operation for the given id -func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { - for method, pathItem := range s.operations { - for path, op := range pathItem { - if operationID == op.ID { - return method, path, op, true - } - } - } - - return "", "", nil, false -} - -// OperationFor the given method and path -func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { - if mp, ok := s.operations[strings.ToUpper(method)]; ok { - op, fn := mp[path] - - return op, fn - } - - return nil, false -} - -// Operations gathers all the operations specified in the spec document -func (s *Spec) Operations() map[string]map[string]*spec.Operation { - return s.operations -} - -func (s *Spec) structMapKeys(mp map[string]struct{}) []string { - if len(mp) == 0 { - return nil - } - - result := make([]string, 0, len(mp)) - for k := range mp { - result = append(result, k) - } - - return result -} - -// AllPaths returns all the paths in the swagger spec -func (s *Spec) AllPaths() map[string]spec.PathItem { - if s.spec == nil || s.spec.Paths == nil { - return nil - } - - return s.spec.Paths.Paths -} - -// OperationIDs gets all the operation ids based on method an dpath -func (s *Spec) OperationIDs() []string { - if len(s.operations) == 0 { - return nil - } - - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p, o := range v { - if o.ID != "" { - result = append(result, o.ID) - } else { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - } - - return result -} - -// OperationMethodPaths gets all the operation ids based on method an dpath -func (s *Spec) OperationMethodPaths() []string { - if len(s.operations) == 0 { - return nil - } - - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p := range v { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - - return result -} - -// RequiredConsumes gets all the distinct consumes that are specified in the specification document -func (s *Spec) RequiredConsumes() []string { - return s.structMapKeys(s.consumes) -} - -// RequiredProduces gets all the distinct produces that are specified in the specification document -func (s *Spec) RequiredProduces() []string { - return s.structMapKeys(s.produces) -} - -// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec -func (s *Spec) RequiredSecuritySchemes() []string { - return s.structMapKeys(s.authSchemes) -} - -// SchemaRef is a reference to a schema -type SchemaRef struct { - Name string - Ref spec.Ref - Schema *spec.Schema - TopLevel bool -} - -// SchemasWithAllOf returns schema references to all schemas that are defined -// with an allOf key -func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { - for _, v := range s.allOfs { - result = append(result, v) - } - - return -} - -// AllDefinitions returns schema references for all the definitions that were discovered -func (s *Spec) AllDefinitions() (result []SchemaRef) { - for _, v := range s.allSchemas { - result = append(result, v) - } - - return -} - -// AllDefinitionReferences returns json refs for all the discovered schemas -func (s *Spec) AllDefinitionReferences() (result []string) { - for _, v := range s.references.schemas { - result = append(result, v.String()) - } - - return -} - -// AllParameterReferences returns json refs for all the discovered parameters -func (s *Spec) AllParameterReferences() (result []string) { - for _, v := range s.references.parameters { - result = append(result, v.String()) - } - - return -} - -// AllResponseReferences returns json refs for all the discovered responses -func (s *Spec) AllResponseReferences() (result []string) { - for _, v := range s.references.responses { - result = append(result, v.String()) - } - - return -} - -// AllPathItemReferences returns the references for all the items -func (s *Spec) AllPathItemReferences() (result []string) { - for _, v := range s.references.pathItems { - result = append(result, v.String()) - } - - return -} - -// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). -// -// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid -// Swagger 2.0 spec. -func (s *Spec) AllItemsReferences() (result []string) { - for _, v := range s.references.items { - result = append(result, v.String()) - } - - return -} - -// AllReferences returns all the references found in the document, with possible duplicates -func (s *Spec) AllReferences() (result []string) { - for _, v := range s.references.allRefs { - result = append(result, v.String()) - } - - return -} - -// AllRefs returns all the unique references found in the document -func (s *Spec) AllRefs() (result []spec.Ref) { - set := make(map[string]struct{}) - for _, v := range s.references.allRefs { - a := v.String() - if a == "" { - continue - } - - if _, ok := set[a]; !ok { - set[a] = struct{}{} - result = append(result, v) - } - } - - return -} - -func cloneStringMap(source map[string]string) map[string]string { - res := make(map[string]string, len(source)) - for k, v := range source { - res[k] = v - } - - return res -} - -func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} { - res := make(map[string][]interface{}, len(source)) - for k, v := range source { - res[k] = v - } - - return res -} - -// ParameterPatterns returns all the patterns found in parameters -// the map is cloned to avoid accidental changes -func (s *Spec) ParameterPatterns() map[string]string { - return cloneStringMap(s.patterns.parameters) -} - -// HeaderPatterns returns all the patterns found in response headers -// the map is cloned to avoid accidental changes -func (s *Spec) HeaderPatterns() map[string]string { - return cloneStringMap(s.patterns.headers) -} - -// ItemsPatterns returns all the patterns found in simple array items -// the map is cloned to avoid accidental changes -func (s *Spec) ItemsPatterns() map[string]string { - return cloneStringMap(s.patterns.items) -} - -// SchemaPatterns returns all the patterns found in schemas -// the map is cloned to avoid accidental changes -func (s *Spec) SchemaPatterns() map[string]string { - return cloneStringMap(s.patterns.schemas) -} - -// AllPatterns returns all the patterns found in the spec -// the map is cloned to avoid accidental changes -func (s *Spec) AllPatterns() map[string]string { - return cloneStringMap(s.patterns.allPatterns) -} - -// ParameterEnums returns all the enums found in parameters -// the map is cloned to avoid accidental changes -func (s *Spec) ParameterEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.parameters) -} - -// HeaderEnums returns all the enums found in response headers -// the map is cloned to avoid accidental changes -func (s *Spec) HeaderEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.headers) -} - -// ItemsEnums returns all the enums found in simple array items -// the map is cloned to avoid accidental changes -func (s *Spec) ItemsEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.items) -} - -// SchemaEnums returns all the enums found in schemas -// the map is cloned to avoid accidental changes -func (s *Spec) SchemaEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.schemas) -} - -// AllEnums returns all the enums found in the spec -// the map is cloned to avoid accidental changes -func (s *Spec) AllEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.allEnums) -} diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go deleted file mode 100644 index 33c15704..00000000 --- a/vendor/github.com/go-openapi/analysis/debug.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "os" - - "github.com/go-openapi/analysis/internal/debug" -) - -var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "") diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go deleted file mode 100644 index e8d9f9b1..00000000 --- a/vendor/github.com/go-openapi/analysis/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package analysis provides methods to work with a Swagger specification document from -package go-openapi/spec. - -## Analyzing a specification - -An analysed specification object (type Spec) provides methods to work with swagger definition. - -## Flattening or expanding a specification - -Flattening a specification bundles all remote $ref in the main spec document. -Depending on flattening options, additional preprocessing may take place: - - full flattening: replacing all inline complex constructs by a named entry in #/definitions - - expand: replace all $ref's in the document by their expanded content - -## Merging several specifications - -Mixin several specifications merges all Swagger constructs, and warns about found conflicts. - -## Fixing a specification - -Unmarshalling a specification with golang json unmarshalling may lead to -some unwanted result on present but empty fields. - -## Analyzing a Swagger schema - -Swagger schemas are analyzed to determine their complexity and qualify their content. -*/ -package analysis diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go deleted file mode 100644 index 7c2ca084..00000000 --- a/vendor/github.com/go-openapi/analysis/fixer.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import "github.com/go-openapi/spec" - -// FixEmptyResponseDescriptions replaces empty ("") response -// descriptions in the input with "(empty)" to ensure that the -// resulting Swagger is stays valid. The problem appears to arise -// from reading in valid specs that have a explicit response -// description of "" (valid, response.description is required), but -// due to zero values being omitted upon re-serializing (omitempty) we -// lose them unless we stick some chars in there. -func FixEmptyResponseDescriptions(s *spec.Swagger) { - for k, v := range s.Responses { - FixEmptyDesc(&v) //#nosec - s.Responses[k] = v - } - - if s.Paths == nil { - return - } - - for _, v := range s.Paths.Paths { - if v.Get != nil { - FixEmptyDescs(v.Get.Responses) - } - if v.Put != nil { - FixEmptyDescs(v.Put.Responses) - } - if v.Post != nil { - FixEmptyDescs(v.Post.Responses) - } - if v.Delete != nil { - FixEmptyDescs(v.Delete.Responses) - } - if v.Options != nil { - FixEmptyDescs(v.Options.Responses) - } - if v.Head != nil { - FixEmptyDescs(v.Head.Responses) - } - if v.Patch != nil { - FixEmptyDescs(v.Patch.Responses) - } - } -} - -// FixEmptyDescs adds "(empty)" as the description for any Response in -// the given Responses object that doesn't already have one. -func FixEmptyDescs(rs *spec.Responses) { - FixEmptyDesc(rs.Default) - for k, v := range rs.StatusCodeResponses { - FixEmptyDesc(&v) //#nosec - rs.StatusCodeResponses[k] = v - } -} - -// FixEmptyDesc adds "(empty)" as the description to the given -// Response object if it doesn't already have one and isn't a -// ref. No-op on nil input. -func FixEmptyDesc(rs *spec.Response) { - if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { - return - } - rs.Description = "(empty)" -} diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go deleted file mode 100644 index ebedcc9d..00000000 --- a/vendor/github.com/go-openapi/analysis/flatten.go +++ /dev/null @@ -1,814 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "log" - "path" - "sort" - "strings" - - "github.com/go-openapi/analysis/internal/flatten/normalize" - "github.com/go-openapi/analysis/internal/flatten/operations" - "github.com/go-openapi/analysis/internal/flatten/replace" - "github.com/go-openapi/analysis/internal/flatten/schutils" - "github.com/go-openapi/analysis/internal/flatten/sortref" - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" -) - -const definitionsPath = "#/definitions" - -// newRef stores information about refs created during the flattening process -type newRef struct { - key string - newName string - path string - isOAIGen bool - resolved bool - schema *spec.Schema - parents []string -} - -// context stores intermediary results from flatten -type context struct { - newRefs map[string]*newRef - warnings []string - resolved map[string]string -} - -func newContext() *context { - return &context{ - newRefs: make(map[string]*newRef, 150), - warnings: make([]string, 0), - resolved: make(map[string]string, 50), - } -} - -// Flatten an analyzed spec and produce a self-contained spec bundle. -// -// There is a minimal and a full flattening mode. -// -// Minimally flattening a spec means: -// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left -// unscathed) -// - Importing external (http, file) references so they become internal to the document -// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers -// like "$ref": "#/definitions/myObject/allOfs/1") -// -// A minimally flattened spec thus guarantees the following properties: -// - all $refs point to a local definition (i.e. '#/definitions/...') -// - definitions are unique -// -// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they -// represent a complex schema or express commonality in the spec. -// Otherwise, they are simply expanded. -// Self-referencing JSON pointers cannot resolve to a type and trigger an error. -// -// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. -// -// Fully flattening a spec means: -// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. -// -// By complex, we mean every JSON object with some properties. -// Arrays, when they do not define a tuple, -// or empty objects with or without additionalProperties, are not considered complex and remain inline. -// -// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions -// have been created. -// -// Available flattening options: -// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched -// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) -// - Verbose: croaks about name conflicts detected -// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening -// -// NOTE: expansion removes all $ref save circular $ref, which remain in place -// -// TODO: additional options -// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a -// x-go-name extension -// - LiftAllOfs: -// - limit the flattening of allOf members when simple objects -// - merge allOf with validation only -// - merge allOf with extensions only -// - ... -func Flatten(opts FlattenOpts) error { - debugLog("FlattenOpts: %#v", opts) - - opts.flattenContext = newContext() - - // 1. Recursively expand responses, parameters, path items and items in simple schemas. - // - // This simplifies the spec and leaves only the $ref's in schema objects. - if err := expand(&opts); err != nil { - return err - } - - // 2. Strip the current document from absolute $ref's that actually a in the root, - // so we can recognize them as proper definitions - // - // In particular, this works around issue go-openapi/spec#76: leading absolute file in $ref is stripped - if err := normalizeRef(&opts); err != nil { - return err - } - - // 3. Optionally remove shared parameters and responses already expanded (now unused). - // - // Operation parameters (i.e. under paths) remain. - if opts.RemoveUnused { - removeUnusedShared(&opts) - } - - // 4. Import all remote references. - if err := importReferences(&opts); err != nil { - return err - } - - // 5. full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) - if !opts.Minimal && !opts.Expand { - if err := nameInlinedSchemas(&opts); err != nil { - return err - } - } - - // 6. Rewrite JSON pointers other than $ref to named definitions - // and attempt to resolve conflicting names whenever possible. - if err := stripPointersAndOAIGen(&opts); err != nil { - return err - } - - // 7. Strip the spec from unused definitions - if opts.RemoveUnused { - removeUnused(&opts) - } - - // 8. Issue warning notifications, if any - opts.croak() - - // TODO: simplify known schema patterns to flat objects with properties - // examples: - // - lift simple allOf object, - // - empty allOf with validation only or extensions only - // - rework allOf arrays - // - rework allOf additionalProperties - - return nil -} - -func expand(opts *FlattenOpts) error { - if err := spec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { - return err - } - - opts.Spec.reload() // re-analyze - - return nil -} - -// normalizeRef strips the current file from any absolute file $ref. This works around issue go-openapi/spec#76: -// leading absolute file in $ref is stripped -func normalizeRef(opts *FlattenOpts) error { - debugLog("normalizeRef") - - altered := false - for k, w := range opts.Spec.references.allRefs { - if !strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS - continue - } - - altered = true - debugLog("stripping absolute path for: %s", w.String()) - - // strip the base path from definition - if err := replace.UpdateRef(opts.Swagger(), k, - spec.MustCreateRef(path.Join(definitionsPath, path.Base(w.String())))); err != nil { - return err - } - } - - if altered { - opts.Spec.reload() // re-analyze - } - - return nil -} - -func removeUnusedShared(opts *FlattenOpts) { - opts.Swagger().Parameters = nil - opts.Swagger().Responses = nil - - opts.Spec.reload() // re-analyze -} - -func importReferences(opts *FlattenOpts) error { - var ( - imported bool - err error - ) - - for !imported && err == nil { - // iteratively import remote references until none left. - // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") - imported, err = importExternalReferences(opts) - - opts.Spec.reload() // re-analyze - } - - return err -} - -// nameInlinedSchemas replaces every complex inline construct by a named definition. -func nameInlinedSchemas(opts *FlattenOpts) error { - debugLog("nameInlinedSchemas") - - namer := &InlineSchemaNamer{ - Spec: opts.Swagger(), - Operations: operations.AllOpRefsByRef(opts.Spec, nil), - flattenContext: opts.flattenContext, - opts: opts, - } - - depthFirst := sortref.DepthFirst(opts.Spec.allSchemas) - for _, key := range depthFirst { - sch := opts.Spec.allSchemas[key] - if sch.Schema == nil || sch.Schema.Ref.String() != "" || sch.TopLevel { - continue - } - - asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if err != nil { - return fmt.Errorf("schema analysis [%s]: %w", key, err) - } - - if asch.isAnalyzedAsComplex() { // move complex schemas to definitions - if err := namer.Name(key, sch.Schema, asch); err != nil { - return err - } - } - } - - opts.Spec.reload() // re-analyze - - return nil -} - -func removeUnused(opts *FlattenOpts) { - for removeUnusedSinglePass(opts) { - // continue until no unused definition remains - } -} - -func removeUnusedSinglePass(opts *FlattenOpts) (hasRemoved bool) { - expected := make(map[string]struct{}) - for k := range opts.Swagger().Definitions { - expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} - } - - for _, k := range opts.Spec.AllDefinitionReferences() { - delete(expected, k) - } - - for k := range expected { - hasRemoved = true - debugLog("removing unused definition %s", path.Base(k)) - if opts.Verbose { - log.Printf("info: removing unused definition: %s", path.Base(k)) - } - delete(opts.Swagger().Definitions, path.Base(k)) - } - - opts.Spec.reload() // re-analyze - - return hasRemoved -} - -func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error { - // rewrite ref with already resolved external ref (useful for cyclical refs): - // rewrite external refs to local ones - debugLog("resolving known ref [%s] to %s", refStr, newName) - - for _, key := range entry.Keys { - if err := replace.UpdateRef(opts.Swagger(), key, spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return err - } - } - - return nil -} - -func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) error { - var ( - isOAIGen bool - newName string - ) - - debugLog("resolving schema from remote $ref [%s]", refStr) - - sch, err := spec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) - if err != nil { - return fmt.Errorf("could not resolve schema: %w", err) - } - - // at this stage only $ref analysis matters - partialAnalyzer := &Spec{ - references: referenceAnalysis{}, - patterns: patternAnalysis{}, - enums: enumAnalysis{}, - } - partialAnalyzer.reset() - partialAnalyzer.analyzeSchema("", sch, "/") - - // now rewrite those refs with rebase - for key, ref := range partialAnalyzer.references.allRefs { - if err := replace.UpdateRef(sch, key, spec.MustCreateRef(normalize.RebaseRef(entry.Ref.String(), ref.String()))); err != nil { - return fmt.Errorf("failed to rewrite ref for key %q at %s: %w", key, entry.Ref.String(), err) - } - } - - // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name - newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref, opts)) - debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen) - - opts.flattenContext.resolved[refStr] = newName - - // rewrite the external refs to local ones - for _, key := range entry.Keys { - if err := replace.UpdateRef(opts.Swagger(), key, - spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return err - } - - // keep track of created refs - resolved := false - if _, ok := opts.flattenContext.newRefs[key]; ok { - resolved = opts.flattenContext.newRefs[key].resolved - } - - debugLog("keeping track of ref: %s (%s), resolved: %t", key, newName, resolved) - opts.flattenContext.newRefs[key] = &newRef{ - key: key, - newName: newName, - path: path.Join(definitionsPath, newName), - isOAIGen: isOAIGen, - resolved: resolved, - schema: sch, - } - } - - // add the resolved schema to the definitions - schutils.Save(opts.Swagger(), newName, sch) - - return nil -} - -// importExternalReferences iteratively digs remote references and imports them into the main schema. -// -// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. -// -// This returns true when no more remote references can be found. -func importExternalReferences(opts *FlattenOpts) (bool, error) { - debugLog("importExternalReferences") - - groupedRefs := sortref.ReverseIndex(opts.Spec.references.schemas, opts.BasePath) - sortedRefStr := make([]string, 0, len(groupedRefs)) - if opts.flattenContext == nil { - opts.flattenContext = newContext() - } - - // sort $ref resolution to ensure deterministic name conflict resolution - for refStr := range groupedRefs { - sortedRefStr = append(sortedRefStr, refStr) - } - sort.Strings(sortedRefStr) - - complete := true - - for _, refStr := range sortedRefStr { - entry := groupedRefs[refStr] - if entry.Ref.HasFragmentOnly { - continue - } - - complete = false - - newName := opts.flattenContext.resolved[refStr] - if newName != "" { - if err := importKnownRef(entry, refStr, newName, opts); err != nil { - return false, err - } - - continue - } - - // resolve schemas - if err := importNewRef(entry, refStr, opts); err != nil { - return false, err - } - } - - // maintains ref index entries - for k := range opts.flattenContext.newRefs { - r := opts.flattenContext.newRefs[k] - - // update tracking with resolved schemas - if r.schema.Ref.String() != "" { - ref := spec.MustCreateRef(r.path) - sch, err := spec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) - if err != nil { - return false, fmt.Errorf("could not resolve schema: %w", err) - } - - r.schema = sch - } - - if r.path == k { - continue - } - - // update tracking with renamed keys: got a cascade of refs - renamed := *r - renamed.key = r.path - opts.flattenContext.newRefs[renamed.path] = &renamed - - // indirect ref - r.newName = path.Base(k) - r.schema = spec.RefSchema(r.path) - r.path = k - r.isOAIGen = strings.Contains(k, "OAIGen") - } - - return complete, nil -} - -// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. -// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. -func stripPointersAndOAIGen(opts *FlattenOpts) error { - // name all JSON pointers to anonymous documents - if err := namePointers(opts); err != nil { - return err - } - - // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) - hasIntroducedPointerOrInline, ers := stripOAIGen(opts) - if ers != nil { - return ers - } - - // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers - for hasIntroducedPointerOrInline { - if !opts.Minimal { - opts.Spec.reload() // re-analyze - if err := nameInlinedSchemas(opts); err != nil { - return err - } - } - - if err := namePointers(opts); err != nil { - return err - } - - // restrip and re-analyze - var err error - if hasIntroducedPointerOrInline, err = stripOAIGen(opts); err != nil { - return err - } - } - - return nil -} - -// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. -// -// A dedupe is deemed unnecessary whenever: -// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) -// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to -// the first parent. -// -// This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate -// pointer and name resolution again. -func stripOAIGen(opts *FlattenOpts) (bool, error) { - debugLog("stripOAIGen") - replacedWithComplex := false - - // figure out referers of OAIGen definitions (doing it before the ref start mutating) - for _, r := range opts.flattenContext.newRefs { - updateRefParents(opts.Spec.references.allRefs, r) - } - - for k := range opts.flattenContext.newRefs { - r := opts.flattenContext.newRefs[k] - debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", - k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) - - if !r.isOAIGen || len(r.parents) == 0 { - continue - } - - hasReplacedWithComplex, err := stripOAIGenForRef(opts, k, r) - if err != nil { - return replacedWithComplex, err - } - - replacedWithComplex = replacedWithComplex || hasReplacedWithComplex - } - - debugLog("replacedWithComplex: %t", replacedWithComplex) - opts.Spec.reload() // re-analyze - - return replacedWithComplex, nil -} - -// updateRefParents updates all parents of an updated $ref -func updateRefParents(allRefs map[string]spec.Ref, r *newRef) { - if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) - return - } - for k, v := range allRefs { - if r.path != v.String() { - continue - } - - found := false - for _, p := range r.parents { - if p == k { - found = true - - break - } - } - if !found { - r.parents = append(r.parents, k) - } - } -} - -func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { - replacedWithComplex := false - - pr := sortref.TopmostFirst(r.parents) - - // rewrite first parent schema in hierarchical then lexicographical order - debugLog("rewrite first parent %s with schema", pr[0]) - if err := replace.UpdateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { - return false, err - } - - if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { - // update parent in ref index entry - debugLog("update parent entry: %s", pr[0]) - pa.schema = r.schema - pa.resolved = false - replacedWithComplex = true - } - - // rewrite other parents to point to first parent - if len(pr) > 1 { - for _, p := range pr[1:] { - replacingRef := spec.MustCreateRef(pr[0]) - - // set complex when replacing ref is an anonymous jsonpointer: further processing may be required - replacedWithComplex = replacedWithComplex || path.Dir(replacingRef.String()) != definitionsPath - debugLog("rewrite parent with ref: %s", replacingRef.String()) - - // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). - // Those are stripped later on. - if err := replace.UpdateRef(opts.Swagger(), p, replacingRef); err != nil { - return false, err - } - - if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { - // update parent in ref index - debugLog("update parent entry: %s", p) - pa.schema = r.schema - pa.resolved = false - replacedWithComplex = true - } - } - } - - // remove OAIGen definition - debugLog("removing definition %s", path.Base(r.path)) - delete(opts.Swagger().Definitions, path.Base(r.path)) - - // propagate changes in ref index for keys which have this one as a parent - for kk, value := range opts.flattenContext.newRefs { - if kk == k || !value.isOAIGen || value.resolved { - continue - } - - found := false - newParents := make([]string, 0, len(value.parents)) - for _, parent := range value.parents { - switch { - case parent == r.path: - found = true - parent = pr[0] - case strings.HasPrefix(parent, r.path+"/"): - found = true - parent = path.Join(pr[0], strings.TrimPrefix(parent, r.path)) - } - - newParents = append(newParents, parent) - } - - if found { - value.parents = newParents - } - } - - // mark naming conflict as resolved - debugLog("marking naming conflict resolved for key: %s", r.key) - opts.flattenContext.newRefs[r.key].isOAIGen = false - opts.flattenContext.newRefs[r.key].resolved = true - - // determine if the previous substitution did inline a complex schema - if r.schema != nil && r.schema.Ref.String() == "" { // inline schema - asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if err != nil { - return false, err - } - - debugLog("re-inlined schema: parent: %s, %t", pr[0], asch.isAnalyzedAsComplex()) - replacedWithComplex = replacedWithComplex || !(path.Dir(pr[0]) == definitionsPath) && asch.isAnalyzedAsComplex() - } - - return replacedWithComplex, nil -} - -// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. -// -// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. -// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). -func namePointers(opts *FlattenOpts) error { - debugLog("name pointers") - - refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) - for k, ref := range opts.Spec.references.allRefs { - debugLog("name pointers: %q => %#v", k, ref) - if path.Dir(ref.String()) == definitionsPath { - // this a ref to a top-level definition: ok - continue - } - - result, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), ref) - if err != nil { - return fmt.Errorf("at %s, %w", k, err) - } - - replacingRef := result.Ref - sch := result.Schema - if opts.flattenContext != nil { - opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) - } - - debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) - refsToReplace[k] = SchemaRef{ - Name: k, // caller - Ref: replacingRef, // called - Schema: sch, - TopLevel: path.Dir(replacingRef.String()) == definitionsPath, - } - } - - depthFirst := sortref.DepthFirst(refsToReplace) - namer := &InlineSchemaNamer{ - Spec: opts.Swagger(), - Operations: operations.AllOpRefsByRef(opts.Spec, nil), - flattenContext: opts.flattenContext, - opts: opts, - } - - for _, key := range depthFirst { - v := refsToReplace[key] - // update current replacement, which may have been updated by previous changes of deeper elements - result, erd := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), v.Ref) - if erd != nil { - return fmt.Errorf("at %s, %w", key, erd) - } - - if opts.flattenContext != nil { - opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) - } - - v.Ref = result.Ref - v.Schema = result.Schema - v.TopLevel = path.Dir(result.Ref.String()) == definitionsPath - debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) - - if v.TopLevel { - debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) - - // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref - if err := replace.UpdateRef(opts.Swagger(), key, v.Ref); err != nil { - return err - } - - continue - } - - if err := flattenAnonPointer(key, v, refsToReplace, namer, opts); err != nil { - return err - } - } - - opts.Spec.reload() // re-analyze - - return nil -} - -func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]SchemaRef, namer *InlineSchemaNamer, opts *FlattenOpts) error { - // this is a JSON pointer to an anonymous document (internal or external): - // create a definition for this schema when: - // - it is a complex schema - // - or it is pointed by more than one $ref (i.e. expresses commonality) - // otherwise, expand the pointer (single reference to a simple type) - // - // The named definition for this follows the target's key, not the caller's - debugLog("namePointers at %s for %s", key, v.Ref.String()) - - // qualify the expanded schema - asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if ers != nil { - return fmt.Errorf("schema analysis [%s]: %w", key, ers) - } - callers := make([]string, 0, 64) - - debugLog("looking for callers") - - an := New(opts.Swagger()) - for k, w := range an.references.allRefs { - r, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), w) - if err != nil { - return fmt.Errorf("at %s, %w", key, err) - } - - if opts.flattenContext != nil { - opts.flattenContext.warnings = append(opts.flattenContext.warnings, r.Warnings...) - } - - if r.Ref.String() == v.Ref.String() { - callers = append(callers, k) - } - } - - debugLog("callers for %s: %d", v.Ref.String(), len(callers)) - if len(callers) == 0 { - // has already been updated and resolved - return nil - } - - parts := sortref.KeyParts(v.Ref.String()) - debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) - - // identifying edge case when the namer did nothing because we point to a non-schema object - // no definition is created and we expand the $ref for all callers - debugLog("decide what to do with the schema pointed to: asch.IsSimpleSchema=%t, len(callers)=%d, parts.IsSharedParam=%t, parts.IsSharedResponse=%t", - asch.IsSimpleSchema, len(callers), parts.IsSharedParam(), parts.IsSharedResponse(), - ) - - if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { - debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) - if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { - return err - } - - // regular case: we named the $ref as a definition, and we move all callers to this new $ref - for _, caller := range callers { - if caller == key { - continue - } - - // move $ref for next to resolve - debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) - c := refsToReplace[caller] - c.Ref = v.Ref - refsToReplace[caller] = c - } - - return nil - } - - // everything that is a simple schema and not factorizable is expanded - debugLog("expand JSON pointer for key=%s", key) - - if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { - return err - } - // NOTE: there is no other caller to update - - return nil -} diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go deleted file mode 100644 index c7d7938e..00000000 --- a/vendor/github.com/go-openapi/analysis/flatten_name.go +++ /dev/null @@ -1,308 +0,0 @@ -package analysis - -import ( - "fmt" - "path" - "sort" - "strings" - - "github.com/go-openapi/analysis/internal/flatten/operations" - "github.com/go-openapi/analysis/internal/flatten/replace" - "github.com/go-openapi/analysis/internal/flatten/schutils" - "github.com/go-openapi/analysis/internal/flatten/sortref" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// InlineSchemaNamer finds a new name for an inlined type -type InlineSchemaNamer struct { - Spec *spec.Swagger - Operations map[string]operations.OpRef - flattenContext *context - opts *FlattenOpts -} - -// Name yields a new name for the inline schema -func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *AnalyzedSchema) error { - debugLog("naming inlined schema at %s", key) - - parts := sortref.KeyParts(key) - for _, name := range namesFromKey(parts, aschema, isn.Operations) { - if name == "" { - continue - } - - // create unique name - mangle := mangler(isn.opts) - newName, isOAIGen := uniqifyName(isn.Spec.Definitions, mangle(name)) - - // clone schema - sch := schutils.Clone(schema) - - // replace values on schema - debugLog("rewriting schema to ref: key=%s with new name: %s", key, newName) - if err := replace.RewriteSchemaToRef(isn.Spec, key, - spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err) - } - - // rewrite any dependent $ref pointing to this place, - // when not already pointing to a top-level definition. - // - // NOTE: this is important if such referers use arbitrary JSON pointers. - an := New(isn.Spec) - for k, v := range an.references.allRefs { - r, erd := replace.DeepestRef(isn.opts.Swagger(), isn.opts.ExpandOpts(false), v) - if erd != nil { - return fmt.Errorf("at %s, %w", k, erd) - } - - if isn.opts.flattenContext != nil { - isn.opts.flattenContext.warnings = append(isn.opts.flattenContext.warnings, r.Warnings...) - } - - if r.Ref.String() != key && (r.Ref.String() != path.Join(definitionsPath, newName) || path.Dir(v.String()) == definitionsPath) { - continue - } - - debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) - - // rewrite $ref to the new target - if err := replace.UpdateRef(isn.Spec, k, - spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return err - } - } - - // NOTE: this extension is currently not used by go-swagger (provided for information only) - sch.AddExtension("x-go-gen-location", GenLocation(parts)) - - // save cloned schema to definitions - schutils.Save(isn.Spec, newName, sch) - - // keep track of created refs - if isn.flattenContext == nil { - continue - } - - debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) - resolved := false - - if _, ok := isn.flattenContext.newRefs[key]; ok { - resolved = isn.flattenContext.newRefs[key].resolved - } - - isn.flattenContext.newRefs[key] = &newRef{ - key: key, - newName: newName, - path: path.Join(definitionsPath, newName), - isOAIGen: isOAIGen, - resolved: resolved, - schema: sch, - } - } - - return nil -} - -// uniqifyName yields a unique name for a definition -func uniqifyName(definitions spec.Definitions, name string) (string, bool) { - isOAIGen := false - if name == "" { - name = "oaiGen" - isOAIGen = true - } - - if len(definitions) == 0 { - return name, isOAIGen - } - - unq := true - for k := range definitions { - if strings.EqualFold(k, name) { - unq = false - - break - } - } - - if unq { - return name, isOAIGen - } - - name += "OAIGen" - isOAIGen = true - var idx int - unique := name - _, known := definitions[unique] - - for known { - idx++ - unique = fmt.Sprintf("%s%d", name, idx) - _, known = definitions[unique] - } - - return unique, isOAIGen -} - -func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations map[string]operations.OpRef) []string { - var ( - baseNames [][]string - startIndex int - ) - - switch { - case parts.IsOperation(): - baseNames, startIndex = namesForOperation(parts, operations) - case parts.IsDefinition(): - baseNames, startIndex = namesForDefinition(parts) - default: - // this a non-standard pointer: build a name by concatenating its parts - baseNames = [][]string{parts} - startIndex = len(baseNames) + 1 - } - - result := make([]string, 0, len(baseNames)) - for _, segments := range baseNames { - nm := parts.BuildName(segments, startIndex, partAdder(aschema)) - if nm == "" { - continue - } - - result = append(result, nm) - } - sort.Strings(result) - - debugLog("names from parts: %v => %v", parts, result) - return result -} - -func namesForParam(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { - var ( - baseNames [][]string - startIndex int - ) - - piref := parts.PathItemRef() - if piref.String() != "" && parts.IsOperationParam() { - if op, ok := operations[piref.String()]; ok { - startIndex = 5 - baseNames = append(baseNames, []string{op.ID, "params", "body"}) - } - } else if parts.IsSharedOperationParam() { - pref := parts.PathRef() - for k, v := range operations { - if strings.HasPrefix(k, pref.String()) { - startIndex = 4 - baseNames = append(baseNames, []string{v.ID, "params", "body"}) - } - } - } - - return baseNames, startIndex -} - -func namesForOperation(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { - var ( - baseNames [][]string - startIndex int - ) - - // params - if parts.IsOperationParam() || parts.IsSharedOperationParam() { - baseNames, startIndex = namesForParam(parts, operations) - } - - // responses - if parts.IsOperationResponse() { - piref := parts.PathItemRef() - if piref.String() != "" { - if op, ok := operations[piref.String()]; ok { - startIndex = 6 - baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) - } - } - } - - return baseNames, startIndex -} - -func namesForDefinition(parts sortref.SplitKey) ([][]string, int) { - nm := parts.DefinitionName() - if nm != "" { - return [][]string{{parts.DefinitionName()}}, 2 - } - - return [][]string{}, 0 -} - -// partAdder knows how to interpret a schema when it comes to build a name from parts -func partAdder(aschema *AnalyzedSchema) sortref.PartAdder { - return func(part string) []string { - segments := make([]string, 0, 2) - - if part == "items" || part == "additionalItems" { - if aschema.IsTuple || aschema.IsTupleWithExtra { - segments = append(segments, "tuple") - } else { - segments = append(segments, "items") - } - - if part == "additionalItems" { - segments = append(segments, part) - } - - return segments - } - - segments = append(segments, part) - - return segments - } -} - -func mangler(o *FlattenOpts) func(string) string { - if o.KeepNames { - return func(in string) string { return in } - } - - return swag.ToJSONName -} - -func nameFromRef(ref spec.Ref, o *FlattenOpts) string { - mangle := mangler(o) - - u := ref.GetURL() - if u.Fragment != "" { - return mangle(path.Base(u.Fragment)) - } - - if u.Path != "" { - bn := path.Base(u.Path) - if bn != "" && bn != "/" { - ext := path.Ext(bn) - if ext != "" { - return mangle(bn[:len(bn)-len(ext)]) - } - - return mangle(bn) - } - } - - return mangle(strings.ReplaceAll(u.Host, ".", " ")) -} - -// GenLocation indicates from which section of the specification (models or operations) a definition has been created. -// -// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is provided -// for information only. -func GenLocation(parts sortref.SplitKey) string { - switch { - case parts.IsOperation(): - return "operations" - case parts.IsDefinition(): - return "models" - default: - return "" - } -} diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go deleted file mode 100644 index c943fe1e..00000000 --- a/vendor/github.com/go-openapi/analysis/flatten_options.go +++ /dev/null @@ -1,79 +0,0 @@ -package analysis - -import ( - "log" - - "github.com/go-openapi/spec" -) - -// FlattenOpts configuration for flattening a swagger specification. -// -// The BasePath parameter is used to locate remote relative $ref found in the specification. -// This path is a file: it points to the location of the root document and may be either a local -// file path or a URL. -// -// If none specified, relative references (e.g. "$ref": "folder/schema.yaml#/definitions/...") -// found in the spec are searched from the current working directory. -type FlattenOpts struct { - Spec *Spec // The analyzed spec to work with - flattenContext *context // Internal context to track flattening activity - - BasePath string // The location of the root document for this spec to resolve relative $ref - - // Flattening options - Expand bool // When true, skip flattening the spec and expand it instead (if Minimal is false) - Minimal bool // When true, do not decompose complex structures such as allOf - Verbose bool // enable some reporting on possible name conflicts detected - RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening - ContinueOnError bool // Continue when spec expansion issues are found - KeepNames bool // Do not attempt to jsonify names from references when flattening - - /* Extra keys */ - _ struct{} // require keys -} - -// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. -func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions { - return &spec.ExpandOptions{ - RelativeBase: f.BasePath, - SkipSchemas: skipSchemas, - ContinueOnError: f.ContinueOnError, - } -} - -// Swagger gets the swagger specification for this flatten operation -func (f *FlattenOpts) Swagger() *spec.Swagger { - return f.Spec.spec -} - -// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting -// from flattening a spec -func (f *FlattenOpts) croak() { - if !f.Verbose { - return - } - - reported := make(map[string]bool, len(f.flattenContext.newRefs)) - for _, v := range f.Spec.references.allRefs { - // warns about duplicate handling - for _, r := range f.flattenContext.newRefs { - if r.isOAIGen && r.path == v.String() { - reported[r.newName] = true - } - } - } - - for k := range reported { - log.Printf("warning: duplicate flattened definition name resolved as %s", k) - } - - // warns about possible type mismatches - uniqueMsg := make(map[string]bool) - for _, msg := range f.flattenContext.warnings { - if _, ok := uniqueMsg[msg]; ok { - continue - } - log.Printf("warning: %s", msg) - uniqueMsg[msg] = true - } -} diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go deleted file mode 100644 index 39f55a97..00000000 --- a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package debug - -import ( - "fmt" - "log" - "os" - "path/filepath" - "runtime" -) - -var ( - output = os.Stdout -) - -// GetLogger provides a prefix debug logger -func GetLogger(prefix string, debug bool) func(string, ...interface{}) { - if debug { - logger := log.New(output, prefix+":", log.LstdFlags) - - return func(msg string, args ...interface{}) { - _, file1, pos1, _ := runtime.Caller(1) - logger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) - } - } - - return func(_ string, _ ...interface{}) {} -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go deleted file mode 100644 index 8c9df058..00000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go +++ /dev/null @@ -1,87 +0,0 @@ -package normalize - -import ( - "net/url" - "path" - "path/filepath" - "strings" - - "github.com/go-openapi/spec" -) - -// RebaseRef rebases a remote ref relative to a base ref. -// -// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). -// -// NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences -func RebaseRef(baseRef string, ref string) string { - baseRef, _ = url.PathUnescape(baseRef) - ref, _ = url.PathUnescape(ref) - - if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { - return ref - } - - parts := strings.Split(ref, "#") - - baseParts := strings.Split(baseRef, "#") - baseURL, _ := url.Parse(baseParts[0]) - if strings.HasPrefix(ref, "#") { - if baseURL.Host == "" { - return strings.Join([]string{baseParts[0], parts[1]}, "#") - } - - return strings.Join([]string{baseParts[0], parts[1]}, "#") - } - - refURL, _ := url.Parse(parts[0]) - if refURL.Host != "" || filepath.IsAbs(parts[0]) { - // not rebasing an absolute path - return ref - } - - // there is a relative path - var basePath string - if baseURL.Host != "" { - // when there is a host, standard URI rules apply (with "/") - baseURL.Path = path.Dir(baseURL.Path) - baseURL.Path = path.Join(baseURL.Path, "/"+parts[0]) - - return baseURL.String() - } - - // this is a local relative path - // basePart[0] and parts[0] are local filesystem directories/files - basePath = filepath.Dir(baseParts[0]) - relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) - if len(parts) > 1 { - return strings.Join([]string{relPath, parts[1]}, "#") - } - - return relPath -} - -// Path renders absolute path on remote file refs -// -// NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences -func Path(ref spec.Ref, basePath string) string { - uri, _ := url.PathUnescape(ref.String()) - if ref.HasFragmentOnly || filepath.IsAbs(uri) { - return uri - } - - refURL, _ := url.Parse(uri) - if refURL.Host != "" { - return uri - } - - parts := strings.Split(uri, "#") - // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage - parts[0] = filepath.Join(filepath.Dir(basePath), parts[0]) - - return strings.Join(parts, "#") -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go deleted file mode 100644 index 7f3a2b87..00000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go +++ /dev/null @@ -1,90 +0,0 @@ -package operations - -import ( - "path" - "sort" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// AllOpRefsByRef returns an index of sortable operations -func AllOpRefsByRef(specDoc Provider, operationIDs []string) map[string]OpRef { - return OpRefsByRef(GatherOperations(specDoc, operationIDs)) -} - -// OpRefsByRef indexes a map of sortable operations -func OpRefsByRef(oprefs map[string]OpRef) map[string]OpRef { - result := make(map[string]OpRef, len(oprefs)) - for _, v := range oprefs { - result[v.Ref.String()] = v - } - - return result -} - -// OpRef is an indexable, sortable operation -type OpRef struct { - Method string - Path string - Key string - ID string - Op *spec.Operation - Ref spec.Ref -} - -// OpRefs is a sortable collection of operations -type OpRefs []OpRef - -func (o OpRefs) Len() int { return len(o) } -func (o OpRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o OpRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } - -// Provider knows how to collect operations from a spec -type Provider interface { - Operations() map[string]map[string]*spec.Operation -} - -// GatherOperations builds a map of sorted operations from a spec -func GatherOperations(specDoc Provider, operationIDs []string) map[string]OpRef { - var oprefs OpRefs - - for method, pathItem := range specDoc.Operations() { - for pth, operation := range pathItem { - vv := *operation - oprefs = append(oprefs, OpRef{ - Key: swag.ToGoName(strings.ToLower(method) + " " + pth), - Method: method, - Path: pth, - ID: vv.ID, - Op: &vv, - Ref: spec.MustCreateRef("#" + path.Join("/paths", jsonpointer.Escape(pth), method)), - }) - } - } - - sort.Sort(oprefs) - - operations := make(map[string]OpRef) - for _, opr := range oprefs { - nm := opr.ID - if nm == "" { - nm = opr.Key - } - - oo, found := operations[nm] - if found && oo.Method != opr.Method && oo.Path != opr.Path { - nm = opr.Key - } - - if len(operationIDs) == 0 || swag.ContainsStrings(operationIDs, opr.ID) || swag.ContainsStrings(operationIDs, nm) { - opr.ID = nm - opr.Op.ID = nm - operations[nm] = opr - } - } - - return operations -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go deleted file mode 100644 index c0f43e72..00000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go +++ /dev/null @@ -1,458 +0,0 @@ -package replace - -import ( - "encoding/json" - "fmt" - "net/url" - "os" - "path" - "strconv" - - "github.com/go-openapi/analysis/internal/debug" - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" -) - -const definitionsPath = "#/definitions" - -var debugLog = debug.GetLogger("analysis/flatten/replace", os.Getenv("SWAGGER_DEBUG") != "") - -// RewriteSchemaToRef replaces a schema with a Ref -func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error { - debugLog("rewriting schema to ref for %s with %s", key, ref.String()) - _, value, err := getPointerFromKey(sp, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *spec.Schema: - return rewriteParentRef(sp, key, ref) - - case spec.Schema: - return rewriteParentRef(sp, key, ref) - - case *spec.SchemaOrArray: - if refable.Schema != nil { - refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - } - - case *spec.SchemaOrBool: - if refable.Schema != nil { - refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - } - case map[string]interface{}: // this happens e.g. if a schema points to an extension unmarshaled as map[string]interface{} - return rewriteParentRef(sp, key, ref) - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error { - parent, entry, pvalue, err := getParentFromKey(sp, key) - if err != nil { - return err - } - - debugLog("rewriting holder for %T", pvalue) - switch container := pvalue.(type) { - case spec.Response: - if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { - return err - } - - case *spec.Response: - container.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case *spec.Responses: - statusCode, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - resp := container.StatusCodeResponses[statusCode] - resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container.StatusCodeResponses[statusCode] = resp - - case map[string]spec.Response: - resp := container[entry] - resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container[entry] = resp - - case spec.Parameter: - if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { - return err - } - - case map[string]spec.Parameter: - param := container[entry] - param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container[entry] = param - - case []spec.Parameter: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - param := container[idx] - param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container[idx] = param - - case spec.Definitions: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case map[string]spec.Schema: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case []spec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case *spec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case spec.SchemaProperties: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case *interface{}: - *container = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) - } - - return nil -} - -// getPointerFromKey retrieves the content of the JSON pointer "key" -func getPointerFromKey(sp interface{}, key string) (string, interface{}, error) { - switch sp.(type) { - case *spec.Schema: - case *spec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - if key == "#/" { - return "", sp, nil - } - // unescape chars in key, e.g. "{}" from path params - pth, _ := url.PathUnescape(key[1:]) - ptr, err := jsonpointer.New(pth) - if err != nil { - return "", nil, err - } - - value, _, err := ptr.Get(sp) - if err != nil { - debugLog("error when getting key: %s with path: %s", key, pth) - - return "", nil, err - } - - return pth, value, nil -} - -// getParentFromKey retrieves the container of the JSON pointer "key" -func getParentFromKey(sp interface{}, key string) (string, string, interface{}, error) { - switch sp.(type) { - case *spec.Schema: - case *spec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - // unescape chars in key, e.g. "{}" from path params - pth, _ := url.PathUnescape(key[1:]) - - parent, entry := path.Dir(pth), path.Base(pth) - debugLog("getting schema holder at: %s, with entry: %s", parent, entry) - - pptr, err := jsonpointer.New(parent) - if err != nil { - return "", "", nil, err - } - pvalue, _, err := pptr.Get(sp) - if err != nil { - return "", "", nil, fmt.Errorf("can't get parent for %s: %w", parent, err) - } - - return parent, entry, pvalue, nil -} - -// UpdateRef replaces a ref by another one -func UpdateRef(sp interface{}, key string, ref spec.Ref) error { - switch sp.(type) { - case *spec.Schema: - case *spec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - debugLog("updating ref for %s with %s", key, ref.String()) - pth, value, err := getPointerFromKey(sp, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *spec.Schema: - refable.Ref = ref - case *spec.SchemaOrArray: - if refable.Schema != nil { - refable.Schema.Ref = ref - } - case *spec.SchemaOrBool: - if refable.Schema != nil { - refable.Schema.Ref = ref - } - case spec.Schema: - debugLog("rewriting holder for %T", refable) - _, entry, pvalue, erp := getParentFromKey(sp, key) - if erp != nil { - return err - } - switch container := pvalue.(type) { - case spec.Definitions: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case map[string]spec.Schema: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case []spec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case *spec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case spec.SchemaProperties: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled container type at %s: %T", key, value) - } - - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -// UpdateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) -func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error { - debugLog("updating ref for %s with schema", key) - pth, value, err := getPointerFromKey(sp, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *spec.Schema: - *refable = *sch - case spec.Schema: - _, entry, pvalue, erp := getParentFromKey(sp, key) - if erp != nil { - return err - } - switch container := pvalue.(type) { - case spec.Definitions: - container[entry] = *sch - - case map[string]spec.Schema: - container[entry] = *sch - - case []spec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container[idx] = *sch - - case *spec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container.Schemas[idx] = *sch - - case spec.SchemaProperties: - container[entry] = *sch - - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value) - } - case *spec.SchemaOrArray: - *refable.Schema = *sch - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - case *spec.SchemaOrBool: - *refable.Schema = *sch - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -// DeepestRefResult holds the results from DeepestRef analysis -type DeepestRefResult struct { - Ref spec.Ref - Schema *spec.Schema - Warnings []string -} - -// DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. -// - if no definition is found, returns the deepest ref. -// - pointers to external files are expanded -// -// NOTE: all external $ref's are assumed to be already expanded at this stage. -func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { - if !ref.HasFragmentOnly { - // we found an external $ref, which is odd at this stage: - // do nothing on external $refs - return &DeepestRefResult{Ref: ref}, nil - } - - currentRef := ref - visited := make(map[string]bool, 64) - warnings := make([]string, 0, 2) - -DOWNREF: - for currentRef.String() != "" { - if path.Dir(currentRef.String()) == definitionsPath { - // this is a top-level definition: stop here and return this ref - return &DeepestRefResult{Ref: currentRef}, nil - } - - if _, beenThere := visited[currentRef.String()]; beenThere { - return nil, - fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String()) - } - - visited[currentRef.String()] = true - value, _, err := currentRef.GetPointer().Get(sp) - if err != nil { - return nil, err - } - - switch refable := value.(type) { - case *spec.Schema: - if refable.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Ref - - case spec.Schema: - if refable.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Ref - - case *spec.SchemaOrArray: - if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Schema.Ref - - case *spec.SchemaOrBool: - if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Schema.Ref - - case spec.Response: - // a pointer points to a schema initially marshalled in responses section... - // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() - var asSchema spec.Schema - - err := asSchema.UnmarshalJSON(asJSON) - if err != nil { - return nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)", - currentRef.String(), value, err, - ) - } - warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) - - if asSchema.Ref.String() == "" { - break DOWNREF - } - currentRef = asSchema.Ref - - case spec.Parameter: - // a pointer points to a schema initially marshalled in parameters section... - // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() - var asSchema spec.Schema - if err := asSchema.UnmarshalJSON(asJSON); err != nil { - return nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)", - currentRef.String(), value, err, - ) - } - - warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) - - if asSchema.Ref.String() == "" { - break DOWNREF - } - currentRef = asSchema.Ref - - default: - // fallback: attempts to resolve the pointer as a schema - if refable == nil { - break DOWNREF - } - - asJSON, _ := json.Marshal(refable) - var asSchema spec.Schema - if err := asSchema.UnmarshalJSON(asJSON); err != nil { - return nil, - fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T (%v)", - currentRef.String(), value, err, - ) - } - warnings = append(warnings, fmt.Sprintf("found $ref %q (%T) interpreted as schema", currentRef.String(), refable)) - - if asSchema.Ref.String() == "" { - break DOWNREF - } - currentRef = asSchema.Ref - } - } - - // assess what schema we're ending with - sch, erv := spec.ResolveRefWithBase(sp, ¤tRef, opts) - if erv != nil { - return nil, erv - } - - if sch == nil { - return nil, fmt.Errorf("no schema found at %s", currentRef.String()) - } - - return &DeepestRefResult{Ref: currentRef, Schema: sch, Warnings: warnings}, nil -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go deleted file mode 100644 index 4590236e..00000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package schutils provides tools to save or clone a schema -// when flattening a spec. -package schutils - -import ( - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// Save registers a schema as an entry in spec #/definitions -func Save(sp *spec.Swagger, name string, schema *spec.Schema) { - if schema == nil { - return - } - - if sp.Definitions == nil { - sp.Definitions = make(map[string]spec.Schema, 150) - } - - sp.Definitions[name] = *schema -} - -// Clone deep-clones a schema -func Clone(schema *spec.Schema) *spec.Schema { - var sch spec.Schema - _ = swag.FromDynamicJSON(schema, &sch) - - return &sch -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go deleted file mode 100644 index ac80fc2e..00000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go +++ /dev/null @@ -1,201 +0,0 @@ -package sortref - -import ( - "net/http" - "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" -) - -const ( - paths = "paths" - responses = "responses" - parameters = "parameters" - definitions = "definitions" -) - -var ( - ignoredKeys map[string]struct{} - validMethods map[string]struct{} -) - -func init() { - ignoredKeys = map[string]struct{}{ - "schema": {}, - "properties": {}, - "not": {}, - "anyOf": {}, - "oneOf": {}, - } - - validMethods = map[string]struct{}{ - "GET": {}, - "HEAD": {}, - "OPTIONS": {}, - "PATCH": {}, - "POST": {}, - "PUT": {}, - "DELETE": {}, - } -} - -// Key represent a key item constructed from /-separated segments -type Key struct { - Segments int - Key string -} - -// Keys is a sortable collable collection of Keys -type Keys []Key - -func (k Keys) Len() int { return len(k) } -func (k Keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } -func (k Keys) Less(i, j int) bool { - return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) -} - -// KeyParts construct a SplitKey with all its /-separated segments decomposed. It is sortable. -func KeyParts(key string) SplitKey { - var res []string - for _, part := range strings.Split(key[1:], "/") { - if part != "" { - res = append(res, jsonpointer.Unescape(part)) - } - } - - return res -} - -// SplitKey holds of the parts of a /-separated key, so that their location may be determined. -type SplitKey []string - -// IsDefinition is true when the split key is in the #/definitions section of a spec -func (s SplitKey) IsDefinition() bool { - return len(s) > 1 && s[0] == definitions -} - -// DefinitionName yields the name of the definition -func (s SplitKey) DefinitionName() string { - if !s.IsDefinition() { - return "" - } - - return s[1] -} - -func (s SplitKey) isKeyName(i int) bool { - if i <= 0 { - return false - } - - count := 0 - for idx := i - 1; idx > 0; idx-- { - if s[idx] != "properties" { - break - } - count++ - } - - return count%2 != 0 -} - -// PartAdder know how to construct the components of a new name -type PartAdder func(string) []string - -// BuildName builds a name from segments -func (s SplitKey) BuildName(segments []string, startIndex int, adder PartAdder) string { - for i, part := range s[startIndex:] { - if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { - segments = append(segments, adder(part)...) - } - } - - return strings.Join(segments, " ") -} - -// IsOperation is true when the split key is in the operations section -func (s SplitKey) IsOperation() bool { - return len(s) > 1 && s[0] == paths -} - -// IsSharedOperationParam is true when the split key is in the parameters section of a path -func (s SplitKey) IsSharedOperationParam() bool { - return len(s) > 2 && s[0] == paths && s[2] == parameters -} - -// IsSharedParam is true when the split key is in the #/parameters section of a spec -func (s SplitKey) IsSharedParam() bool { - return len(s) > 1 && s[0] == parameters -} - -// IsOperationParam is true when the split key is in the parameters section of an operation -func (s SplitKey) IsOperationParam() bool { - return len(s) > 3 && s[0] == paths && s[3] == parameters -} - -// IsOperationResponse is true when the split key is in the responses section of an operation -func (s SplitKey) IsOperationResponse() bool { - return len(s) > 3 && s[0] == paths && s[3] == responses -} - -// IsSharedResponse is true when the split key is in the #/responses section of a spec -func (s SplitKey) IsSharedResponse() bool { - return len(s) > 1 && s[0] == responses -} - -// IsDefaultResponse is true when the split key is the default response for an operation -func (s SplitKey) IsDefaultResponse() bool { - return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" -} - -// IsStatusCodeResponse is true when the split key is an operation response with a status code -func (s SplitKey) IsStatusCodeResponse() bool { - isInt := func() bool { - _, err := strconv.Atoi(s[4]) - - return err == nil - } - - return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() -} - -// ResponseName yields either the status code or "Default" for a response -func (s SplitKey) ResponseName() string { - if s.IsStatusCodeResponse() { - code, _ := strconv.Atoi(s[4]) - - return http.StatusText(code) - } - - if s.IsDefaultResponse() { - return "Default" - } - - return "" -} - -// PathItemRef constructs a $ref object from a split key of the form /{path}/{method} -func (s SplitKey) PathItemRef() spec.Ref { - if len(s) < 3 { - return spec.Ref{} - } - - pth, method := s[1], s[2] - if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { - return spec.Ref{} - } - - return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) -} - -// PathRef constructs a $ref object from a split key of the form /paths/{reference} -func (s SplitKey) PathRef() spec.Ref { - if !s.IsOperation() { - return spec.Ref{} - } - - return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(s[1]))) -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go deleted file mode 100644 index 73243df8..00000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go +++ /dev/null @@ -1,141 +0,0 @@ -package sortref - -import ( - "reflect" - "sort" - "strings" - - "github.com/go-openapi/analysis/internal/flatten/normalize" - "github.com/go-openapi/spec" -) - -var depthGroupOrder = []string{ - "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", -} - -type mapIterator struct { - len int - mapIter *reflect.MapIter -} - -func (i *mapIterator) Next() bool { - return i.mapIter.Next() -} - -func (i *mapIterator) Len() int { - return i.len -} - -func (i *mapIterator) Key() string { - return i.mapIter.Key().String() -} - -func mustMapIterator(anyMap interface{}) *mapIterator { - val := reflect.ValueOf(anyMap) - - return &mapIterator{mapIter: val.MapRange(), len: val.Len()} -} - -// DepthFirst sorts a map of anything. It groups keys by category -// (shared params, op param, statuscode response, default response, definitions) -// sort groups internally by number of parts in the key and lexical names -// flatten groups into a single list of keys -func DepthFirst(in interface{}) []string { - iterator := mustMapIterator(in) - sorted := make([]string, 0, iterator.Len()) - grouped := make(map[string]Keys, iterator.Len()) - - for iterator.Next() { - k := iterator.Key() - split := KeyParts(k) - var pk string - - if split.IsSharedOperationParam() { - pk = "sharedOpParam" - } - if split.IsOperationParam() { - pk = "opParam" - } - if split.IsStatusCodeResponse() { - pk = "codeResponse" - } - if split.IsDefaultResponse() { - pk = "defaultResponse" - } - if split.IsDefinition() { - pk = "definition" - } - if split.IsSharedParam() { - pk = "sharedParam" - } - if split.IsSharedResponse() { - pk = "sharedResponse" - } - grouped[pk] = append(grouped[pk], Key{Segments: len(split), Key: k}) - } - - for _, pk := range depthGroupOrder { - res := grouped[pk] - sort.Sort(res) - - for _, v := range res { - sorted = append(sorted, v.Key) - } - } - - return sorted -} - -// topMostRefs is able to sort refs by hierarchical then lexicographic order, -// yielding refs ordered breadth-first. -type topmostRefs []string - -func (k topmostRefs) Len() int { return len(k) } -func (k topmostRefs) Swap(i, j int) { k[i], k[j] = k[j], k[i] } -func (k topmostRefs) Less(i, j int) bool { - li, lj := len(strings.Split(k[i], "/")), len(strings.Split(k[j], "/")) - if li == lj { - return k[i] < k[j] - } - - return li < lj -} - -// TopmostFirst sorts references by depth -func TopmostFirst(refs []string) []string { - res := topmostRefs(refs) - sort.Sort(res) - - return res -} - -// RefRevIdx is a reverse index for references -type RefRevIdx struct { - Ref spec.Ref - Keys []string -} - -// ReverseIndex builds a reverse index for references in schemas -func ReverseIndex(schemas map[string]spec.Ref, basePath string) map[string]RefRevIdx { - collected := make(map[string]RefRevIdx) - for key, schRef := range schemas { - // normalize paths before sorting, - // so we get together keys that are from the same external file - normalizedPath := normalize.Path(schRef, basePath) - - entry, ok := collected[normalizedPath] - if ok { - entry.Keys = append(entry.Keys, key) - collected[normalizedPath] = entry - - continue - } - - collected[normalizedPath] = RefRevIdx{ - Ref: schRef, - Keys: []string{key}, - } - } - - return collected -} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go deleted file mode 100644 index 7785a29b..00000000 --- a/vendor/github.com/go-openapi/analysis/mixin.go +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "reflect" - - "github.com/go-openapi/spec" -) - -// Mixin modifies the primary swagger spec by adding the paths and -// definitions from the mixin specs. Top level parameters and -// responses from the mixins are also carried over. Operation id -// collisions are avoided by appending "Mixin" but only if -// needed. -// -// The following parts of primary are subject to merge, filling empty details -// - Info -// - BasePath -// - Host -// - ExternalDocs -// -// Consider calling FixEmptyResponseDescriptions() on the modified primary -// if you read them from storage and they are valid to start with. -// -// Entries in "paths", "definitions", "parameters" and "responses" are -// added to the primary in the order of the given mixins. If the entry -// already exists in primary it is skipped with a warning message. -// -// The count of skipped entries (from collisions) is returned so any -// deviation from the number expected can flag a warning in your build -// scripts. Carefully review the collisions before accepting them; -// consider renaming things if possible. -// -// No key normalization takes place (paths, type defs, -// etc). Ensure they are canonical if your downstream tools do -// key normalization of any form. -// -// Merging schemes (http, https), and consumers/producers do not account for -// collisions. -func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { - skipped := make([]string, 0, len(mixins)) - opIDs := getOpIDs(primary) - initPrimary(primary) - - for i, m := range mixins { - skipped = append(skipped, mergeSwaggerProps(primary, m)...) - - skipped = append(skipped, mergeConsumes(primary, m)...) - - skipped = append(skipped, mergeProduces(primary, m)...) - - skipped = append(skipped, mergeTags(primary, m)...) - - skipped = append(skipped, mergeSchemes(primary, m)...) - - skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) - - skipped = append(skipped, mergeSecurityRequirements(primary, m)...) - - skipped = append(skipped, mergeDefinitions(primary, m)...) - - // merging paths requires a map of operationIDs to work with - skipped = append(skipped, mergePaths(primary, m, opIDs, i)...) - - skipped = append(skipped, mergeParameters(primary, m)...) - - skipped = append(skipped, mergeResponses(primary, m)...) - } - - return skipped -} - -// getOpIDs extracts all the paths..operationIds from the given -// spec and returns them as the keys in a map with 'true' values. -func getOpIDs(s *spec.Swagger) map[string]bool { - rv := make(map[string]bool) - if s.Paths == nil { - return rv - } - - for _, v := range s.Paths.Paths { - piops := pathItemOps(v) - - for _, op := range piops { - rv[op.ID] = true - } - } - - return rv -} - -func pathItemOps(p spec.PathItem) []*spec.Operation { - var rv []*spec.Operation - rv = appendOp(rv, p.Get) - rv = appendOp(rv, p.Put) - rv = appendOp(rv, p.Post) - rv = appendOp(rv, p.Delete) - rv = appendOp(rv, p.Head) - rv = appendOp(rv, p.Patch) - - return rv -} - -func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { - if op == nil { - return ops - } - - return append(ops, op) -} - -func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.SecurityDefinitions { - if _, exists := primary.SecurityDefinitions[k]; exists { - warn := fmt.Sprintf( - "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - - primary.SecurityDefinitions[k] = v - } - - return -} - -func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for _, v := range m.Security { - found := false - for _, vv := range primary.Security { - if reflect.DeepEqual(v, vv) { - found = true - - break - } - } - - if found { - warn := fmt.Sprintf( - "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) - skipped = append(skipped, warn) - - continue - } - primary.Security = append(primary.Security, v) - } - - return -} - -func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Definitions { - // assume name collisions represent IDENTICAL type. careful. - if _, exists := primary.Definitions[k]; exists { - warn := fmt.Sprintf( - "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - primary.Definitions[k] = v - } - - return -} - -func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIDs map[string]bool, mixIndex int) (skipped []string) { - if m.Paths != nil { - for k, v := range m.Paths.Paths { - if _, exists := primary.Paths.Paths[k]; exists { - warn := fmt.Sprintf( - "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - - // Swagger requires that operationIds be - // unique within a spec. If we find a - // collision we append "Mixin0" to the - // operatoinId we are adding, where 0 is mixin - // index. We assume that operationIds with - // all the proivded specs are already unique. - piops := pathItemOps(v) - for _, piop := range piops { - if opIDs[piop.ID] { - piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) - } - opIDs[piop.ID] = true - } - primary.Paths.Paths[k] = v - } - } - - return -} - -func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Parameters { - // could try to rename on conflict but would - // have to fix $refs in the mixin. Complain - // for now - if _, exists := primary.Parameters[k]; exists { - warn := fmt.Sprintf( - "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - primary.Parameters[k] = v - } - - return -} - -func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Responses { - // could try to rename on conflict but would - // have to fix $refs in the mixin. Complain - // for now - if _, exists := primary.Responses[k]; exists { - warn := fmt.Sprintf( - "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - primary.Responses[k] = v - } - - return skipped -} - -func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Consumes { - found := false - for _, vv := range primary.Consumes { - if v == vv { - found = true - - break - } - } - - if found { - // no warning here: we just skip it - continue - } - primary.Consumes = append(primary.Consumes, v) - } - - return []string{} -} - -func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Produces { - found := false - for _, vv := range primary.Produces { - if v == vv { - found = true - - break - } - } - - if found { - // no warning here: we just skip it - continue - } - primary.Produces = append(primary.Produces, v) - } - - return []string{} -} - -func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for _, v := range m.Tags { - found := false - for _, vv := range primary.Tags { - if v.Name == vv.Name { - found = true - - break - } - } - - if found { - warn := fmt.Sprintf( - "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", - v.Name, - ) - skipped = append(skipped, warn) - - continue - } - - primary.Tags = append(primary.Tags, v) - } - - return -} - -func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Schemes { - found := false - for _, vv := range primary.Schemes { - if v == vv { - found = true - - break - } - } - - if found { - // no warning here: we just skip it - continue - } - primary.Schemes = append(primary.Schemes, v) - } - - return []string{} -} - -func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { - var skipped, skippedInfo, skippedDocs []string - - primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) - - // merging details in swagger top properties - if primary.Host == "" { - primary.Host = m.Host - } - - if primary.BasePath == "" { - primary.BasePath = m.BasePath - } - - if primary.Info == nil { - primary.Info = m.Info - } else if m.Info != nil { - skippedInfo = mergeInfo(primary.Info, m.Info) - skipped = append(skipped, skippedInfo...) - } - - if primary.ExternalDocs == nil { - primary.ExternalDocs = m.ExternalDocs - } else if m != nil { - skippedDocs = mergeExternalDocs(primary.ExternalDocs, m.ExternalDocs) - skipped = append(skipped, skippedDocs...) - } - - return skipped -} - -//nolint:unparam -func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string { - if primary.Description == "" { - primary.Description = m.Description - } - - if primary.URL == "" { - primary.URL = m.URL - } - - return nil -} - -func mergeInfo(primary *spec.Info, m *spec.Info) []string { - var sk, skipped []string - - primary.Extensions, sk = mergeExtensions(primary.Extensions, m.Extensions) - skipped = append(skipped, sk...) - - if primary.Description == "" { - primary.Description = m.Description - } - - if primary.Title == "" { - primary.Description = m.Description - } - - if primary.TermsOfService == "" { - primary.TermsOfService = m.TermsOfService - } - - if primary.Version == "" { - primary.Version = m.Version - } - - if primary.Contact == nil { - primary.Contact = m.Contact - } else if m.Contact != nil { - var csk []string - primary.Contact.Extensions, csk = mergeExtensions(primary.Contact.Extensions, m.Contact.Extensions) - skipped = append(skipped, csk...) - - if primary.Contact.Name == "" { - primary.Contact.Name = m.Contact.Name - } - - if primary.Contact.URL == "" { - primary.Contact.URL = m.Contact.URL - } - - if primary.Contact.Email == "" { - primary.Contact.Email = m.Contact.Email - } - } - - if primary.License == nil { - primary.License = m.License - } else if m.License != nil { - var lsk []string - primary.License.Extensions, lsk = mergeExtensions(primary.License.Extensions, m.License.Extensions) - skipped = append(skipped, lsk...) - - if primary.License.Name == "" { - primary.License.Name = m.License.Name - } - - if primary.License.URL == "" { - primary.License.URL = m.License.URL - } - } - - return skipped -} - -func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { - if primary == nil { - result = m - - return - } - - if m == nil { - result = primary - - return - } - - result = primary - for k, v := range m { - if _, found := primary[k]; found { - skipped = append(skipped, k) - - continue - } - - primary[k] = v - } - - return -} - -func initPrimary(primary *spec.Swagger) { - if primary.SecurityDefinitions == nil { - primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) - } - - if primary.Security == nil { - primary.Security = make([]map[string][]string, 0, 10) - } - - if primary.Produces == nil { - primary.Produces = make([]string, 0, 10) - } - - if primary.Consumes == nil { - primary.Consumes = make([]string, 0, 10) - } - - if primary.Tags == nil { - primary.Tags = make([]spec.Tag, 0, 10) - } - - if primary.Schemes == nil { - primary.Schemes = make([]string, 0, 10) - } - - if primary.Paths == nil { - primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} - } - - if primary.Paths.Paths == nil { - primary.Paths.Paths = make(map[string]spec.PathItem) - } - - if primary.Definitions == nil { - primary.Definitions = make(spec.Definitions) - } - - if primary.Parameters == nil { - primary.Parameters = make(map[string]spec.Parameter) - } - - if primary.Responses == nil { - primary.Responses = make(map[string]spec.Response) - } -} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go deleted file mode 100644 index ab190db5..00000000 --- a/vendor/github.com/go-openapi/analysis/schema.go +++ /dev/null @@ -1,256 +0,0 @@ -package analysis - -import ( - "errors" - - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" -) - -// SchemaOpts configures the schema analyzer -type SchemaOpts struct { - Schema *spec.Schema - Root interface{} - BasePath string - _ struct{} -} - -// Schema analysis, will classify the schema according to known -// patterns. -func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { - if opts.Schema == nil { - return nil, errors.New("no schema to analyze") - } - - a := &AnalyzedSchema{ - schema: opts.Schema, - root: opts.Root, - basePath: opts.BasePath, - } - - a.initializeFlags() - a.inferKnownType() - a.inferEnum() - a.inferBaseType() - - if err := a.inferMap(); err != nil { - return nil, err - } - if err := a.inferArray(); err != nil { - return nil, err - } - - a.inferTuple() - - if err := a.inferFromRef(); err != nil { - return nil, err - } - - a.inferSimpleSchema() - - return a, nil -} - -// AnalyzedSchema indicates what the schema represents -type AnalyzedSchema struct { - schema *spec.Schema - root interface{} - basePath string - - hasProps bool - hasAllOf bool - hasItems bool - hasAdditionalProps bool - hasAdditionalItems bool - hasRef bool - - IsKnownType bool - IsSimpleSchema bool - IsArray bool - IsSimpleArray bool - IsMap bool - IsSimpleMap bool - IsExtendedObject bool - IsTuple bool - IsTupleWithExtra bool - IsBaseType bool - IsEnum bool -} - -// Inherits copies value fields from other onto this schema -func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { - if other == nil { - return - } - a.hasProps = other.hasProps - a.hasAllOf = other.hasAllOf - a.hasItems = other.hasItems - a.hasAdditionalItems = other.hasAdditionalItems - a.hasAdditionalProps = other.hasAdditionalProps - a.hasRef = other.hasRef - - a.IsKnownType = other.IsKnownType - a.IsSimpleSchema = other.IsSimpleSchema - a.IsArray = other.IsArray - a.IsSimpleArray = other.IsSimpleArray - a.IsMap = other.IsMap - a.IsSimpleMap = other.IsSimpleMap - a.IsExtendedObject = other.IsExtendedObject - a.IsTuple = other.IsTuple - a.IsTupleWithExtra = other.IsTupleWithExtra - a.IsBaseType = other.IsBaseType - a.IsEnum = other.IsEnum -} - -func (a *AnalyzedSchema) inferFromRef() error { - if a.hasRef { - sch := new(spec.Schema) - sch.Ref = a.schema.Ref - err := spec.ExpandSchema(sch, a.root, nil) - if err != nil { - return err - } - rsch, err := Schema(SchemaOpts{ - Schema: sch, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - // NOTE(fredbi): currently the only cause for errors is - // unresolved ref. Since spec.ExpandSchema() expands the - // schema recursively, there is no chance to get there, - // until we add more causes for error in this schema analysis. - return err - } - a.inherits(rsch) - } - - return nil -} - -func (a *AnalyzedSchema) inferSimpleSchema() { - a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap -} - -func (a *AnalyzedSchema) inferKnownType() { - tpe := a.schema.Type - format := a.schema.Format - a.IsKnownType = tpe.Contains("boolean") || - tpe.Contains("integer") || - tpe.Contains("number") || - tpe.Contains("string") || - (format != "" && strfmt.Default.ContainsName(format)) || - (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) -} - -func (a *AnalyzedSchema) inferMap() error { - if !a.isObjectType() { - return nil - } - - hasExtra := a.hasProps || a.hasAllOf - a.IsMap = a.hasAdditionalProps && !hasExtra - a.IsExtendedObject = a.hasAdditionalProps && hasExtra - - if !a.IsMap { - return nil - } - - // maps - if a.schema.AdditionalProperties.Schema != nil { - msch, err := Schema(SchemaOpts{ - Schema: a.schema.AdditionalProperties.Schema, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - return err - } - a.IsSimpleMap = msch.IsSimpleSchema - } else if a.schema.AdditionalProperties.Allows { - a.IsSimpleMap = true - } - - return nil -} - -func (a *AnalyzedSchema) inferArray() error { - // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple - // (yes, even if the Items array contains only one element). - // arrays in JSON schema may be unrestricted (i.e no Items specified). - // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. - // - // NOTE: the spec package misses the distinction between: - // items: [] and items: {}, so we consider both arrays here. - a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) - if a.IsArray && a.hasItems { - if a.schema.Items.Schema != nil { - itsch, err := Schema(SchemaOpts{ - Schema: a.schema.Items.Schema, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - return err - } - - a.IsSimpleArray = itsch.IsSimpleSchema - } - } - - if a.IsArray && !a.hasItems { - a.IsSimpleArray = true - } - - return nil -} - -func (a *AnalyzedSchema) inferTuple() { - tuple := a.hasItems && a.schema.Items.Schemas != nil - a.IsTuple = tuple && !a.hasAdditionalItems - a.IsTupleWithExtra = tuple && a.hasAdditionalItems -} - -func (a *AnalyzedSchema) inferBaseType() { - if a.isObjectType() { - a.IsBaseType = a.schema.Discriminator != "" - } -} - -func (a *AnalyzedSchema) inferEnum() { - a.IsEnum = len(a.schema.Enum) > 0 -} - -func (a *AnalyzedSchema) initializeFlags() { - a.hasProps = len(a.schema.Properties) > 0 - a.hasAllOf = len(a.schema.AllOf) > 0 - a.hasRef = a.schema.Ref.String() != "" - - a.hasItems = a.schema.Items != nil && - (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) - - a.hasAdditionalProps = a.schema.AdditionalProperties != nil && - (a.schema.AdditionalProperties.Schema != nil || a.schema.AdditionalProperties.Allows) - - a.hasAdditionalItems = a.schema.AdditionalItems != nil && - (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) -} - -func (a *AnalyzedSchema) isObjectType() bool { - return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) -} - -func (a *AnalyzedSchema) isArrayType() bool { - return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) -} - -// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). -// -// Complex means the schema is any of: -// - a simple type (primitive) -// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) -// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will -// generate a definition) -func (a *AnalyzedSchema) isAnalyzedAsComplex() bool { - return !a.IsSimpleSchema && !a.IsArray && !a.IsMap -} diff --git a/vendor/github.com/go-openapi/errors/.gitattributes b/vendor/github.com/go-openapi/errors/.gitattributes deleted file mode 100644 index a0717e4b..00000000 --- a/vendor/github.com/go-openapi/errors/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.go text eol=lf \ No newline at end of file diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore deleted file mode 100644 index dd91ed6a..00000000 --- a/vendor/github.com/go-openapi/errors/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml deleted file mode 100644 index cf88ead3..00000000 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ /dev/null @@ -1,62 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 3 - -linters: - enable-all: true - disable: - - errname # this repo doesn't follow the convention advised by this linter - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md deleted file mode 100644 index 6d57ea55..00000000 --- a/vendor/github.com/go-openapi/errors/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# OpenAPI errors [![Build Status](https://github.com/go-openapi/errors/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) - -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/errors.svg)](https://pkg.go.dev/github.com/go-openapi/errors) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) - -Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go deleted file mode 100644 index 5320cb96..00000000 --- a/vendor/github.com/go-openapi/errors/api.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "encoding/json" - "fmt" - "net/http" - "reflect" - "strings" -) - -// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code. -var DefaultHTTPCode = http.StatusUnprocessableEntity - -// Error represents a error interface all swagger framework errors implement -type Error interface { - error - Code() int32 -} - -type apiError struct { - code int32 - message string -} - -func (a *apiError) Error() string { - return a.message -} - -func (a *apiError) Code() int32 { - return a.code -} - -// MarshalJSON implements the JSON encoding interface -func (a apiError) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "code": a.code, - "message": a.message, - }) -} - -// New creates a new API error with a code and a message -func New(code int32, message string, args ...interface{}) Error { - if len(args) > 0 { - return &apiError{ - code: code, - message: fmt.Sprintf(message, args...), - } - } - return &apiError{ - code: code, - message: message, - } -} - -// NotFound creates a new not found error -func NotFound(message string, args ...interface{}) Error { - if message == "" { - message = "Not found" - } - return New(http.StatusNotFound, fmt.Sprintf(message, args...)) -} - -// NotImplemented creates a new not implemented error -func NotImplemented(message string) Error { - return New(http.StatusNotImplemented, message) -} - -// MethodNotAllowedError represents an error for when the path matches but the method doesn't -type MethodNotAllowedError struct { - code int32 - Allowed []string - message string -} - -func (m *MethodNotAllowedError) Error() string { - return m.message -} - -// Code the error code -func (m *MethodNotAllowedError) Code() int32 { - return m.code -} - -// MarshalJSON implements the JSON encoding interface -func (m MethodNotAllowedError) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "code": m.code, - "message": m.message, - "allowed": m.Allowed, - }) -} - -func errorAsJSON(err Error) []byte { - //nolint:errchkjson - b, _ := json.Marshal(struct { - Code int32 `json:"code"` - Message string `json:"message"` - }{err.Code(), err.Error()}) - return b -} - -func flattenComposite(errs *CompositeError) *CompositeError { - var res []error - for _, er := range errs.Errors { - switch e := er.(type) { - case *CompositeError: - if e != nil && len(e.Errors) > 0 { - flat := flattenComposite(e) - if len(flat.Errors) > 0 { - res = append(res, flat.Errors...) - } - } - default: - if e != nil { - res = append(res, e) - } - } - } - return CompositeValidationError(res...) -} - -// MethodNotAllowed creates a new method not allowed error -func MethodNotAllowed(requested string, allow []string) Error { - msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) - return &MethodNotAllowedError{ - code: http.StatusMethodNotAllowed, - Allowed: allow, - message: msg, - } -} - -// ServeError implements the http error handler interface -func ServeError(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Set("Content-Type", "application/json") - switch e := err.(type) { - case *CompositeError: - er := flattenComposite(e) - // strips composite errors to first element only - if len(er.Errors) > 0 { - ServeError(rw, r, er.Errors[0]) - } else { - // guard against empty CompositeError (invalid construct) - ServeError(rw, r, nil) - } - case *MethodNotAllowedError: - rw.Header().Add("Allow", strings.Join(e.Allowed, ",")) - rw.WriteHeader(asHTTPCode(int(e.Code()))) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(e)) - } - case Error: - value := reflect.ValueOf(e) - if value.Kind() == reflect.Ptr && value.IsNil() { - rw.WriteHeader(http.StatusInternalServerError) - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) - return - } - rw.WriteHeader(asHTTPCode(int(e.Code()))) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(e)) - } - case nil: - rw.WriteHeader(http.StatusInternalServerError) - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) - default: - rw.WriteHeader(http.StatusInternalServerError) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error()))) - } - } -} - -func asHTTPCode(input int) int { - if input >= 600 { - return DefaultHTTPCode - } - return input -} diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go deleted file mode 100644 index 0545b501..00000000 --- a/vendor/github.com/go-openapi/errors/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import "net/http" - -// Unauthenticated returns an unauthenticated error -func Unauthenticated(scheme string) Error { - return New(http.StatusUnauthorized, "unauthenticated for %s", scheme) -} diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go deleted file mode 100644 index af01190c..00000000 --- a/vendor/github.com/go-openapi/errors/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package errors provides an Error interface and several concrete types -implementing this interface to manage API errors and JSON-schema validation -errors. - -A middleware handler ServeError() is provided to serve the errors types -it defines. - -It is used throughout the various go-openapi toolkit libraries -(https://github.com/go-openapi). -*/ -package errors diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go deleted file mode 100644 index dfebe8f9..00000000 --- a/vendor/github.com/go-openapi/errors/headers.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "encoding/json" - "fmt" - "net/http" -) - -// Validation represents a failure of a precondition -type Validation struct { - code int32 - Name string - In string - Value interface{} - message string - Values []interface{} -} - -func (e *Validation) Error() string { - return e.message -} - -// Code the error code -func (e *Validation) Code() int32 { - return e.code -} - -// MarshalJSON implements the JSON encoding interface -func (e Validation) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "code": e.code, - "message": e.message, - "in": e.In, - "name": e.Name, - "value": e.Value, - "values": e.Values, - }) -} - -// ValidateName sets the name for a validation or updates it for a nested property -func (e *Validation) ValidateName(name string) *Validation { - if name != "" { - if e.Name == "" { - e.Name = name - e.message = name + e.message - } else { - e.Name = name + "." + e.Name - e.message = name + "." + e.message - } - } - return e -} - -const ( - contentTypeFail = `unsupported media type %q, only %v are allowed` - responseFormatFail = `unsupported media type requested, only %v are available` -) - -// InvalidContentType error for an invalid content type -func InvalidContentType(value string, allowed []string) *Validation { - values := make([]interface{}, 0, len(allowed)) - for _, v := range allowed { - values = append(values, v) - } - return &Validation{ - code: http.StatusUnsupportedMediaType, - Name: "Content-Type", - In: "header", - Value: value, - Values: values, - message: fmt.Sprintf(contentTypeFail, value, allowed), - } -} - -// InvalidResponseFormat error for an unacceptable response format request -func InvalidResponseFormat(value string, allowed []string) *Validation { - values := make([]interface{}, 0, len(allowed)) - for _, v := range allowed { - values = append(values, v) - } - return &Validation{ - code: http.StatusNotAcceptable, - Name: "Accept", - In: "header", - Value: value, - Values: values, - message: fmt.Sprintf(responseFormatFail, allowed), - } -} diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go deleted file mode 100644 index 963472d1..00000000 --- a/vendor/github.com/go-openapi/errors/middleware.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "bytes" - "fmt" - "strings" -) - -// APIVerificationFailed is an error that contains all the missing info for a mismatched section -// between the api registrations and the api spec -type APIVerificationFailed struct { - Section string `json:"section,omitempty"` - MissingSpecification []string `json:"missingSpecification,omitempty"` - MissingRegistration []string `json:"missingRegistration,omitempty"` -} - -func (v *APIVerificationFailed) Error() string { - buf := bytes.NewBuffer(nil) - - hasRegMissing := len(v.MissingRegistration) > 0 - hasSpecMissing := len(v.MissingSpecification) > 0 - - if hasRegMissing { - buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)) - } - - if hasRegMissing && hasSpecMissing { - buf.WriteString("\n") - } - - if hasSpecMissing { - buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)) - } - - return buf.String() -} diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go deleted file mode 100644 index 5096e1ea..00000000 --- a/vendor/github.com/go-openapi/errors/parsing.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "encoding/json" - "fmt" -) - -// ParseError represents a parsing error -type ParseError struct { - code int32 - Name string - In string - Value string - Reason error - message string -} - -func (e *ParseError) Error() string { - return e.message -} - -// Code returns the http status code for this error -func (e *ParseError) Code() int32 { - return e.code -} - -// MarshalJSON implements the JSON encoding interface -func (e ParseError) MarshalJSON() ([]byte, error) { - var reason string - if e.Reason != nil { - reason = e.Reason.Error() - } - return json.Marshal(map[string]interface{}{ - "code": e.code, - "message": e.message, - "in": e.In, - "name": e.Name, - "value": e.Value, - "reason": reason, - }) -} - -const ( - parseErrorTemplContent = `parsing %s %s from %q failed, because %s` - parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` -) - -// NewParseError creates a new parse error -func NewParseError(name, in, value string, reason error) *ParseError { - var msg string - if in == "" { - msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) - } else { - msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) - } - return &ParseError{ - code: 400, - Name: name, - In: in, - Value: value, - Reason: reason, - message: msg, - } -} diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go deleted file mode 100644 index cf7ac2ed..00000000 --- a/vendor/github.com/go-openapi/errors/schema.go +++ /dev/null @@ -1,615 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "encoding/json" - "fmt" - "strings" -) - -const ( - invalidType = "%s is an invalid type name" - typeFail = "%s in %s must be of type %s" - typeFailWithData = "%s in %s must be of type %s: %q" - typeFailWithError = "%s in %s must be of type %s, because: %s" - requiredFail = "%s in %s is required" - readOnlyFail = "%s in %s is readOnly" - tooLongMessage = "%s in %s should be at most %d chars long" - tooShortMessage = "%s in %s should be at least %d chars long" - patternFail = "%s in %s should match '%s'" - enumFail = "%s in %s should be one of %v" - multipleOfFail = "%s in %s should be a multiple of %v" - maxIncFail = "%s in %s should be less than or equal to %v" - maxExcFail = "%s in %s should be less than %v" - minIncFail = "%s in %s should be greater than or equal to %v" - minExcFail = "%s in %s should be greater than %v" - uniqueFail = "%s in %s shouldn't contain duplicates" - maxItemsFail = "%s in %s should have at most %d items" - minItemsFail = "%s in %s should have at least %d items" - typeFailNoIn = "%s must be of type %s" - typeFailWithDataNoIn = "%s must be of type %s: %q" - typeFailWithErrorNoIn = "%s must be of type %s, because: %s" - requiredFailNoIn = "%s is required" - readOnlyFailNoIn = "%s is readOnly" - tooLongMessageNoIn = "%s should be at most %d chars long" - tooShortMessageNoIn = "%s should be at least %d chars long" - patternFailNoIn = "%s should match '%s'" - enumFailNoIn = "%s should be one of %v" - multipleOfFailNoIn = "%s should be a multiple of %v" - maxIncFailNoIn = "%s should be less than or equal to %v" - maxExcFailNoIn = "%s should be less than %v" - minIncFailNoIn = "%s should be greater than or equal to %v" - minExcFailNoIn = "%s should be greater than %v" - uniqueFailNoIn = "%s shouldn't contain duplicates" - maxItemsFailNoIn = "%s should have at most %d items" - minItemsFailNoIn = "%s should have at least %d items" - noAdditionalItems = "%s in %s can't have additional items" - noAdditionalItemsNoIn = "%s can't have additional items" - tooFewProperties = "%s in %s should have at least %d properties" - tooFewPropertiesNoIn = "%s should have at least %d properties" - tooManyProperties = "%s in %s should have at most %d properties" - tooManyPropertiesNoIn = "%s should have at most %d properties" - unallowedProperty = "%s.%s in %s is a forbidden property" - unallowedPropertyNoIn = "%s.%s is a forbidden property" - failedAllPatternProps = "%s.%s in %s failed all pattern properties" - failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" - multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" -) - -// All code responses can be used to differentiate errors for different handling -// by the consuming program -const ( - // CompositeErrorCode remains 422 for backwards-compatibility - // and to separate it from validation errors with cause - CompositeErrorCode = 422 - // InvalidTypeCode is used for any subclass of invalid types - InvalidTypeCode = 600 + iota - RequiredFailCode - TooLongFailCode - TooShortFailCode - PatternFailCode - EnumFailCode - MultipleOfFailCode - MaxFailCode - MinFailCode - UniqueFailCode - MaxItemsFailCode - MinItemsFailCode - NoAdditionalItemsCode - TooFewPropertiesCode - TooManyPropertiesCode - UnallowedPropertyCode - FailedAllPatternPropsCode - MultipleOfMustBePositiveCode - ReadOnlyFailCode -) - -// CompositeError is an error that groups several errors together -type CompositeError struct { - Errors []error - code int32 - message string -} - -// Code for this error -func (c *CompositeError) Code() int32 { - return c.code -} - -func (c *CompositeError) Error() string { - if len(c.Errors) > 0 { - msgs := []string{c.message + ":"} - for _, e := range c.Errors { - msgs = append(msgs, e.Error()) - } - return strings.Join(msgs, "\n") - } - return c.message -} - -func (c *CompositeError) Unwrap() []error { - return c.Errors -} - -// MarshalJSON implements the JSON encoding interface -func (c CompositeError) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "code": c.code, - "message": c.message, - "errors": c.Errors, - }) -} - -// CompositeValidationError an error to wrap a bunch of other errors -func CompositeValidationError(errors ...error) *CompositeError { - return &CompositeError{ - code: CompositeErrorCode, - Errors: append(make([]error, 0, len(errors)), errors...), - message: "validation failure list", - } -} - -// ValidateName recursively sets the name for all validations or updates them for nested properties -func (c *CompositeError) ValidateName(name string) *CompositeError { - for i, e := range c.Errors { - if ve, ok := e.(*Validation); ok { - c.Errors[i] = ve.ValidateName(name) - } else if ce, ok := e.(*CompositeError); ok { - c.Errors[i] = ce.ValidateName(name) - } - } - - return c -} - -// FailedAllPatternProperties an error for when the property doesn't match a pattern -func FailedAllPatternProperties(name, in, key string) *Validation { - msg := fmt.Sprintf(failedAllPatternProps, name, key, in) - if in == "" { - msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) - } - return &Validation{ - code: FailedAllPatternPropsCode, - Name: name, - In: in, - Value: key, - message: msg, - } -} - -// PropertyNotAllowed an error for when the property doesn't match a pattern -func PropertyNotAllowed(name, in, key string) *Validation { - msg := fmt.Sprintf(unallowedProperty, name, key, in) - if in == "" { - msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) - } - return &Validation{ - code: UnallowedPropertyCode, - Name: name, - In: in, - Value: key, - message: msg, - } -} - -// TooFewProperties an error for an object with too few properties -func TooFewProperties(name, in string, n int64) *Validation { - msg := fmt.Sprintf(tooFewProperties, name, in, n) - if in == "" { - msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) - } - return &Validation{ - code: TooFewPropertiesCode, - Name: name, - In: in, - Value: n, - message: msg, - } -} - -// TooManyProperties an error for an object with too many properties -func TooManyProperties(name, in string, n int64) *Validation { - msg := fmt.Sprintf(tooManyProperties, name, in, n) - if in == "" { - msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) - } - return &Validation{ - code: TooManyPropertiesCode, - Name: name, - In: in, - Value: n, - message: msg, - } -} - -// AdditionalItemsNotAllowed an error for invalid additional items -func AdditionalItemsNotAllowed(name, in string) *Validation { - msg := fmt.Sprintf(noAdditionalItems, name, in) - if in == "" { - msg = fmt.Sprintf(noAdditionalItemsNoIn, name) - } - return &Validation{ - code: NoAdditionalItemsCode, - Name: name, - In: in, - message: msg, - } -} - -// InvalidCollectionFormat another flavor of invalid type error -func InvalidCollectionFormat(name, in, format string) *Validation { - return &Validation{ - code: InvalidTypeCode, - Name: name, - In: in, - Value: format, - message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), - } -} - -// InvalidTypeName an error for when the type is invalid -func InvalidTypeName(typeName string) *Validation { - return &Validation{ - code: InvalidTypeCode, - Value: typeName, - message: fmt.Sprintf(invalidType, typeName), - } -} - -// InvalidType creates an error for when the type is invalid -func InvalidType(name, in, typeName string, value interface{}) *Validation { - var message string - - if in != "" { - switch value.(type) { - case string: - message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) - case error: - message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) - default: - message = fmt.Sprintf(typeFail, name, in, typeName) - } - } else { - switch value.(type) { - case string: - message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) - case error: - message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) - default: - message = fmt.Sprintf(typeFailNoIn, name, typeName) - } - } - - return &Validation{ - code: InvalidTypeCode, - Name: name, - In: in, - Value: value, - message: message, - } - -} - -// DuplicateItems error for when an array contains duplicates -func DuplicateItems(name, in string) *Validation { - msg := fmt.Sprintf(uniqueFail, name, in) - if in == "" { - msg = fmt.Sprintf(uniqueFailNoIn, name) - } - return &Validation{ - code: UniqueFailCode, - Name: name, - In: in, - message: msg, - } -} - -// TooManyItems error for when an array contains too many items -func TooManyItems(name, in string, max int64, value interface{}) *Validation { - msg := fmt.Sprintf(maxItemsFail, name, in, max) - if in == "" { - msg = fmt.Sprintf(maxItemsFailNoIn, name, max) - } - - return &Validation{ - code: MaxItemsFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// TooFewItems error for when an array contains too few items -func TooFewItems(name, in string, min int64, value interface{}) *Validation { - msg := fmt.Sprintf(minItemsFail, name, in, min) - if in == "" { - msg = fmt.Sprintf(minItemsFailNoIn, name, min) - } - return &Validation{ - code: MinItemsFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// ExceedsMaximumInt error for when maximum validation fails -func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMaximumUint error for when maximum validation fails -func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMaximum error for when maximum validation fails -func ExceedsMaximum(name, in string, max float64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMinimumInt error for when minimum validation fails -func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMinimumUint error for when minimum validation fails -func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMinimum error for when minimum validation fails -func ExceedsMinimum(name, in string, min float64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// NotMultipleOf error for when multiple of validation fails -func NotMultipleOf(name, in string, multiple, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) - } else { - msg = fmt.Sprintf(multipleOfFail, name, in, multiple) - } - return &Validation{ - code: MultipleOfFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// EnumFail error for when an enum validation fails -func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(enumFailNoIn, name, values) - } else { - msg = fmt.Sprintf(enumFail, name, in, values) - } - - return &Validation{ - code: EnumFailCode, - Name: name, - In: in, - Value: value, - Values: values, - message: msg, - } -} - -// Required error for when a value is missing -func Required(name, in string, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(requiredFailNoIn, name) - } else { - msg = fmt.Sprintf(requiredFail, name, in) - } - return &Validation{ - code: RequiredFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// ReadOnly error for when a value is present in request -func ReadOnly(name, in string, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(readOnlyFailNoIn, name) - } else { - msg = fmt.Sprintf(readOnlyFail, name, in) - } - return &Validation{ - code: ReadOnlyFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// TooLong error for when a string is too long -func TooLong(name, in string, max int64, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(tooLongMessageNoIn, name, max) - } else { - msg = fmt.Sprintf(tooLongMessage, name, in, max) - } - return &Validation{ - code: TooLongFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// TooShort error for when a string is too short -func TooShort(name, in string, min int64, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(tooShortMessageNoIn, name, min) - } else { - msg = fmt.Sprintf(tooShortMessage, name, in, min) - } - - return &Validation{ - code: TooShortFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// FailedPattern error for when a string fails a regex pattern match -// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. -func FailedPattern(name, in, pattern string, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(patternFailNoIn, name, pattern) - } else { - msg = fmt.Sprintf(patternFail, name, in, pattern) - } - - return &Validation{ - code: PatternFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// MultipleOfMustBePositive error for when a -// multipleOf factor is negative -func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { - return &Validation{ - code: MultipleOfMustBePositiveCode, - Name: name, - In: in, - Value: factor, - message: fmt.Sprintf(multipleOfMustBePositive, name, factor), - } -} diff --git a/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/vendor/github.com/go-openapi/jsonpointer/.editorconfig deleted file mode 100644 index 3152da69..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore deleted file mode 100644 index 769c2440..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml deleted file mode 100644 index 22f8d21c..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml +++ /dev/null @@ -1,61 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 3 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md deleted file mode 100644 index 0108f1d5..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) - -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) - -An implementation of JSON Pointer - Go language - -## Status -Completed YES - -Tested YES - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -### Note -The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go deleted file mode 100644 index d970c7cf..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Main and unique file. -// -// created 25-02-2013 - -package jsonpointer - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/go-openapi/swag" -) - -const ( - emptyPointer = `` - pointerSeparator = `/` - - invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator - notFound = `Can't find the pointer in the document` -) - -var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() -var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() - -// JSONPointable is an interface for structs to implement when they need to customize the -// json pointer process -type JSONPointable interface { - JSONLookup(string) (any, error) -} - -// JSONSetable is an interface for structs to implement when they need to customize the -// json pointer process -type JSONSetable interface { - JSONSet(string, any) error -} - -// New creates a new json pointer for the given string -func New(jsonPointerString string) (Pointer, error) { - - var p Pointer - err := p.parse(jsonPointerString) - return p, err - -} - -// Pointer the json pointer reprsentation -type Pointer struct { - referenceTokens []string -} - -// "Constructor", parses the given string JSON pointer -func (p *Pointer) parse(jsonPointerString string) error { - - var err error - - if jsonPointerString != emptyPointer { - if !strings.HasPrefix(jsonPointerString, pointerSeparator) { - err = errors.New(invalidStart) - } else { - referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) - } - } - - return err -} - -// Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document any) (any, reflect.Kind, error) { - return p.get(document, swag.DefaultJSONNameProvider) -} - -// Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document any, value any) (any, error) { - return document, p.set(document, value, swag.DefaultJSONNameProvider) -} - -// GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { - return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) -} - -// SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document any, decodedToken string, value any) (any, error) { - return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) -} - -func isNil(input any) bool { - if input == nil { - return true - } - - kind := reflect.TypeOf(input).Kind() - switch kind { //nolint:exhaustive - case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: - return reflect.ValueOf(input).IsNil() - default: - return false - } -} - -func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind := rValue.Kind() - if isNil(node) { - return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) - } - - switch typed := node.(type) { - case JSONPointable: - r, err := typed.JSONLookup(decodedToken) - if err != nil { - return nil, kind, err - } - return r, kind, nil - case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect - return getSingleImpl(*typed, decodedToken, nameProvider) - } - - switch kind { //nolint:exhaustive - case reflect.Struct: - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return nil, kind, fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - return fld.Interface(), kind, nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) - - if mv.IsValid() { - return mv.Interface(), kind, nil - } - return nil, kind, fmt.Errorf("object has no key %q", decodedToken) - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return nil, kind, err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - return elem.Interface(), kind, nil - - default: - return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) - } - -} - -func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { - rValue := reflect.Indirect(reflect.ValueOf(node)) - - if ns, ok := node.(JSONSetable); ok { // pointer impl - return ns.JSONSet(decodedToken, data) - } - - if rValue.Type().Implements(jsonSetableType) { - return node.(JSONSetable).JSONSet(decodedToken, data) - } - - switch rValue.Kind() { //nolint:exhaustive - case reflect.Struct: - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - if fld.IsValid() { - fld.Set(reflect.ValueOf(data)) - } - return nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - rValue.SetMapIndex(kv, reflect.ValueOf(data)) - return nil - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - if !elem.CanSet() { - return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) - } - elem.Set(reflect.ValueOf(data)) - return nil - - default: - return fmt.Errorf("invalid token reference %q", decodedToken) - } - -} - -func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { - - if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider - } - - kind := reflect.Invalid - - // Full document when empty - if len(p.referenceTokens) == 0 { - return node, kind, nil - } - - for _, token := range p.referenceTokens { - - decodedToken := Unescape(token) - - r, knd, err := getSingleImpl(node, decodedToken, nameProvider) - if err != nil { - return nil, knd, err - } - node = r - } - - rValue := reflect.ValueOf(node) - kind = rValue.Kind() - - return node, kind, nil -} - -func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { - knd := reflect.ValueOf(node).Kind() - - if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return errors.New("only structs, pointers, maps and slices are supported for setting values") - } - - if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider - } - - // Full document when empty - if len(p.referenceTokens) == 0 { - return nil - } - - lastI := len(p.referenceTokens) - 1 - for i, token := range p.referenceTokens { - isLastToken := i == lastI - decodedToken := Unescape(token) - - if isLastToken { - - return setSingleImpl(node, data, decodedToken, nameProvider) - } - - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind := rValue.Kind() - - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return err - } - fld := reflect.ValueOf(r) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { - node = fld.Addr().Interface() - continue - } - node = r - continue - } - - switch kind { //nolint:exhaustive - case reflect.Struct: - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { - node = fld.Addr().Interface() - continue - } - node = fld.Interface() - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) - - if !mv.IsValid() { - return fmt.Errorf("object has no key %q", decodedToken) - } - if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { - node = mv.Addr().Interface() - continue - } - node = mv.Interface() - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { - node = elem.Addr().Interface() - continue - } - node = elem.Interface() - - default: - return fmt.Errorf("invalid token reference %q", decodedToken) - } - - } - - return nil -} - -// DecodedTokens returns the decoded tokens -func (p *Pointer) DecodedTokens() []string { - result := make([]string, 0, len(p.referenceTokens)) - for _, t := range p.referenceTokens { - result = append(result, Unescape(t)) - } - return result -} - -// IsEmpty returns true if this is an empty json pointer -// this indicates that it points to the root document -func (p *Pointer) IsEmpty() bool { - return len(p.referenceTokens) == 0 -} - -// Pointer to string representation function -func (p *Pointer) String() string { - - if len(p.referenceTokens) == 0 { - return emptyPointer - } - - pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) - - return pointerString -} - -func (p *Pointer) Offset(document string) (int64, error) { - dec := json.NewDecoder(strings.NewReader(document)) - var offset int64 - for _, ttk := range p.DecodedTokens() { - tk, err := dec.Token() - if err != nil { - return 0, err - } - switch tk := tk.(type) { - case json.Delim: - switch tk { - case '{': - offset, err = offsetSingleObject(dec, ttk) - if err != nil { - return 0, err - } - case '[': - offset, err = offsetSingleArray(dec, ttk) - if err != nil { - return 0, err - } - default: - return 0, fmt.Errorf("invalid token %#v", tk) - } - default: - return 0, fmt.Errorf("invalid token %#v", tk) - } - } - return offset, nil -} - -func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { - for dec.More() { - offset := dec.InputOffset() - tk, err := dec.Token() - if err != nil { - return 0, err - } - switch tk := tk.(type) { - case json.Delim: - switch tk { - case '{': - if err = drainSingle(dec); err != nil { - return 0, err - } - case '[': - if err = drainSingle(dec); err != nil { - return 0, err - } - } - case string: - if tk == decodedToken { - return offset, nil - } - default: - return 0, fmt.Errorf("invalid token %#v", tk) - } - } - return 0, fmt.Errorf("token reference %q not found", decodedToken) -} - -func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { - idx, err := strconv.Atoi(decodedToken) - if err != nil { - return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) - } - var i int - for i = 0; i < idx && dec.More(); i++ { - tk, err := dec.Token() - if err != nil { - return 0, err - } - - if delim, isDelim := tk.(json.Delim); isDelim { - switch delim { - case '{': - if err = drainSingle(dec); err != nil { - return 0, err - } - case '[': - if err = drainSingle(dec); err != nil { - return 0, err - } - } - } - } - - if !dec.More() { - return 0, fmt.Errorf("token reference %q not found", decodedToken) - } - return dec.InputOffset(), nil -} - -// drainSingle drains a single level of object or array. -// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. -func drainSingle(dec *json.Decoder) error { - for dec.More() { - tk, err := dec.Token() - if err != nil { - return err - } - if delim, isDelim := tk.(json.Delim); isDelim { - switch delim { - case '{': - if err = drainSingle(dec); err != nil { - return err - } - case '[': - if err = drainSingle(dec); err != nil { - return err - } - } - } - } - - // Consumes the ending delim - if _, err := dec.Token(); err != nil { - return err - } - return nil -} - -// Specific JSON pointer encoding here -// ~0 => ~ -// ~1 => / -// ... and vice versa - -const ( - encRefTok0 = `~0` - encRefTok1 = `~1` - decRefTok0 = `~` - decRefTok1 = `/` -) - -// Unescape unescapes a json pointer reference token string to the original representation -func Unescape(token string) string { - step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) - step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) - return step2 -} - -// Escape escapes a pointer reference token string -func Escape(token string) string { - step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) - step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) - return step2 -} diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore deleted file mode 100644 index 769c2440..00000000 --- a/vendor/github.com/go-openapi/jsonreference/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml deleted file mode 100644 index 22f8d21c..00000000 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ /dev/null @@ -1,61 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 3 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/jsonreference/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md deleted file mode 100644 index c7fc2049..00000000 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# gojsonreference [![Build Status](https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) - -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonreference.svg)](https://pkg.go.dev/github.com/go-openapi/jsonreference) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonreference)](https://goreportcard.com/report/github.com/go-openapi/jsonreference) - -An implementation of JSON Reference - Go language - -## Status -Feature complete. Stable API - -## Dependencies -* https://github.com/go-openapi/jsonpointer - -## References - -* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go deleted file mode 100644 index f0610cf1..00000000 --- a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go +++ /dev/null @@ -1,69 +0,0 @@ -package internal - -import ( - "net/url" - "regexp" - "strings" -) - -const ( - defaultHTTPPort = ":80" - defaultHTTPSPort = ":443" -) - -// Regular expressions used by the normalizations -var rxPort = regexp.MustCompile(`(:\d+)/?$`) -var rxDupSlashes = regexp.MustCompile(`/{2,}`) - -// NormalizeURL will normalize the specified URL -// This was added to replace a previous call to the no longer maintained purell library: -// The call that was used looked like the following: -// -// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) -// -// To explain all that was included in the call above, purell.FlagsSafe was really just the following: -// - FlagLowercaseScheme -// - FlagLowercaseHost -// - FlagRemoveDefaultPort -// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) -// -// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment. -func NormalizeURL(u *url.URL) { - lowercaseScheme(u) - lowercaseHost(u) - removeDefaultPort(u) - removeDuplicateSlashes(u) - - u.RawPath = "" - u.RawFragment = "" -} - -func lowercaseScheme(u *url.URL) { - if len(u.Scheme) > 0 { - u.Scheme = strings.ToLower(u.Scheme) - } -} - -func lowercaseHost(u *url.URL) { - if len(u.Host) > 0 { - u.Host = strings.ToLower(u.Host) - } -} - -func removeDefaultPort(u *url.URL) { - if len(u.Host) > 0 { - scheme := strings.ToLower(u.Scheme) - u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) { - return "" - } - return val - }) - } -} - -func removeDuplicateSlashes(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") - } -} diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go deleted file mode 100644 index cfdef03e..00000000 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Main and unique file. -// -// created 26-02-2013 - -package jsonreference - -import ( - "errors" - "net/url" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/jsonreference/internal" -) - -const ( - fragmentRune = `#` -) - -// New creates a new reference for the given string -func New(jsonReferenceString string) (Ref, error) { - - var r Ref - err := r.parse(jsonReferenceString) - return r, err - -} - -// MustCreateRef parses the ref string and panics when it's invalid. -// Use the New method for a version that returns an error -func MustCreateRef(ref string) Ref { - r, err := New(ref) - if err != nil { - panic(err) - } - return r -} - -// Ref represents a json reference object -type Ref struct { - referenceURL *url.URL - referencePointer jsonpointer.Pointer - - HasFullURL bool - HasURLPathOnly bool - HasFragmentOnly bool - HasFileScheme bool - HasFullFilePath bool -} - -// GetURL gets the URL for this reference -func (r *Ref) GetURL() *url.URL { - return r.referenceURL -} - -// GetPointer gets the json pointer for this reference -func (r *Ref) GetPointer() *jsonpointer.Pointer { - return &r.referencePointer -} - -// String returns the best version of the url for this reference -func (r *Ref) String() string { - - if r.referenceURL != nil { - return r.referenceURL.String() - } - - if r.HasFragmentOnly { - return fragmentRune + r.referencePointer.String() - } - - return r.referencePointer.String() -} - -// IsRoot returns true if this reference is a root document -func (r *Ref) IsRoot() bool { - return r.referenceURL != nil && - !r.IsCanonical() && - !r.HasURLPathOnly && - r.referenceURL.Fragment == "" -} - -// IsCanonical returns true when this pointer starts with http(s):// or file:// -func (r *Ref) IsCanonical() bool { - return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) -} - -// "Constructor", parses the given string JSON reference -func (r *Ref) parse(jsonReferenceString string) error { - - parsed, err := url.Parse(jsonReferenceString) - if err != nil { - return err - } - - internal.NormalizeURL(parsed) - - r.referenceURL = parsed - refURL := r.referenceURL - - if refURL.Scheme != "" && refURL.Host != "" { - r.HasFullURL = true - } else { - if refURL.Path != "" { - r.HasURLPathOnly = true - } else if refURL.RawQuery == "" && refURL.Fragment != "" { - r.HasFragmentOnly = true - } - } - - r.HasFileScheme = refURL.Scheme == "file" - r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") - - // invalid json-pointer error means url has no json-pointer fragment. simply ignore error - r.referencePointer, _ = jsonpointer.New(refURL.Fragment) - - return nil -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - childURL := child.GetURL() - parentURL := r.GetURL() - if childURL == nil { - return nil, errors.New("child url is nil") - } - if parentURL == nil { - return &child, nil - } - - ref, err := New(parentURL.ResolveReference(childURL).String()) - if err != nil { - return nil, err - } - return &ref, nil -} diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig deleted file mode 100644 index 3152da69..00000000 --- a/vendor/github.com/go-openapi/loads/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore deleted file mode 100644 index e4f15f17..00000000 --- a/vendor/github.com/go-openapi/loads/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -secrets.yml -coverage.out -profile.cov -profile.out diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml deleted file mode 100644 index 22f8d21c..00000000 --- a/vendor/github.com/go-openapi/loads/.golangci.yml +++ /dev/null @@ -1,61 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 3 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml deleted file mode 100644 index cd4a7c33..00000000 --- a/vendor/github.com/go-openapi/loads/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.16.x -- 1.x -install: -- go get gotest.tools/gotestsum -language: go -arch: -- amd64 -- ppc64le -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -notifications: - slack: - secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= -script: -- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/loads/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md deleted file mode 100644 index f8bd440d..00000000 --- a/vendor/github.com/go-openapi/loads/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) - -Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go deleted file mode 100644 index 5bcaef5d..00000000 --- a/vendor/github.com/go-openapi/loads/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package loads provides document loading methods for swagger (OAI) specifications. -// -// It is used by other go-openapi packages to load and run analysis on local or remote spec documents. -package loads diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go deleted file mode 100644 index b2d1e034..00000000 --- a/vendor/github.com/go-openapi/loads/loaders.go +++ /dev/null @@ -1,133 +0,0 @@ -package loads - -import ( - "encoding/json" - "errors" - "net/url" - - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -var ( - // Default chain of loaders, defined at the package level. - // - // By default this matches json and yaml documents. - // - // May be altered with AddLoader(). - loaders *loader -) - -func init() { - jsonLoader := &loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Match: func(_ string) bool { - return true - }, - Fn: JSONDoc, - }, - } - - loaders = jsonLoader.WithHead(&loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Match: swag.YAMLMatcher, - Fn: swag.YAMLDoc, - }, - }) - - // sets the global default loader for go-openapi/spec - spec.PathLoader = loaders.Load -} - -// DocLoader represents a doc loader type -type DocLoader func(string) (json.RawMessage, error) - -// DocMatcher represents a predicate to check if a loader matches -type DocMatcher func(string) bool - -// DocLoaderWithMatch describes a loading function for a given extension match. -type DocLoaderWithMatch struct { - Fn DocLoader - Match DocMatcher -} - -// NewDocLoaderWithMatch builds a DocLoaderWithMatch to be used in load options -func NewDocLoaderWithMatch(fn DocLoader, matcher DocMatcher) DocLoaderWithMatch { - return DocLoaderWithMatch{ - Fn: fn, - Match: matcher, - } -} - -type loader struct { - DocLoaderWithMatch - Next *loader -} - -// WithHead adds a loader at the head of the current stack -func (l *loader) WithHead(head *loader) *loader { - if head == nil { - return l - } - head.Next = l - return head -} - -// WithNext adds a loader at the trail of the current stack -func (l *loader) WithNext(next *loader) *loader { - l.Next = next - return next -} - -// Load the raw document from path -func (l *loader) Load(path string) (json.RawMessage, error) { - _, erp := url.Parse(path) - if erp != nil { - return nil, erp - } - - lastErr := errors.New("no loader matched") // default error if no match was found - for ldr := l; ldr != nil; ldr = ldr.Next { - if ldr.Match != nil && !ldr.Match(path) { - continue - } - - // try then move to next one if there is an error - b, err := ldr.Fn(path) - if err == nil { - return b, nil - } - - lastErr = err - } - - return nil, lastErr -} - -// JSONDoc loads a json document from either a file or a remote url -func JSONDoc(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// AddLoader for a document, executed before other previously set loaders. -// -// This sets the configuration at the package level. -// -// NOTE: -// - this updates the default loader used by github.com/go-openapi/spec -// - since this sets package level globals, you shouln't call this concurrently -func AddLoader(predicate DocMatcher, load DocLoader) { - loaders = loaders.WithHead(&loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Match: predicate, - Fn: load, - }, - }) - - // sets the global default loader for go-openapi/spec - spec.PathLoader = loaders.Load -} diff --git a/vendor/github.com/go-openapi/loads/options.go b/vendor/github.com/go-openapi/loads/options.go deleted file mode 100644 index f8305d56..00000000 --- a/vendor/github.com/go-openapi/loads/options.go +++ /dev/null @@ -1,61 +0,0 @@ -package loads - -type options struct { - loader *loader -} - -func defaultOptions() *options { - return &options{ - loader: loaders, - } -} - -func loaderFromOptions(options []LoaderOption) *loader { - opts := defaultOptions() - for _, apply := range options { - apply(opts) - } - - return opts.loader -} - -// LoaderOption allows to fine-tune the spec loader behavior -type LoaderOption func(*options) - -// WithDocLoader sets a custom loader for loading specs -func WithDocLoader(l DocLoader) LoaderOption { - return func(opt *options) { - if l == nil { - return - } - opt.loader = &loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Fn: l, - }, - } - } -} - -// WithDocLoaderMatches sets a chain of custom loaders for loading specs -// for different extension matches. -// -// Loaders are executed in the order of provided DocLoaderWithMatch'es. -func WithDocLoaderMatches(l ...DocLoaderWithMatch) LoaderOption { - return func(opt *options) { - var final, prev *loader - for _, ldr := range l { - if ldr.Fn == nil { - continue - } - - if prev == nil { - final = &loader{DocLoaderWithMatch: ldr} - prev = final - continue - } - - prev = prev.WithNext(&loader{DocLoaderWithMatch: ldr}) - } - opt.loader = final - } -} diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go deleted file mode 100644 index c9039cd5..00000000 --- a/vendor/github.com/go-openapi/loads/spec.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package loads - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "fmt" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -func init() { - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) -} - -// Document represents a swagger spec document -type Document struct { - // specAnalyzer - Analyzer *analysis.Spec - spec *spec.Swagger - specFilePath string - origSpec *spec.Swagger - schema *spec.Schema - pathLoader *loader - raw json.RawMessage -} - -// JSONSpec loads a spec from a json document -func JSONSpec(path string, options ...LoaderOption) (*Document, error) { - data, err := JSONDoc(path) - if err != nil { - return nil, err - } - // convert to json - doc, err := Analyzed(data, "", options...) - if err != nil { - return nil, err - } - - doc.specFilePath = path - - return doc, nil -} - -// Embedded returns a Document based on embedded specs. No analysis is required -func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, error) { - var origSpec, flatSpec spec.Swagger - if err := json.Unmarshal(orig, &origSpec); err != nil { - return nil, err - } - if err := json.Unmarshal(flat, &flatSpec); err != nil { - return nil, err - } - return &Document{ - raw: orig, - origSpec: &origSpec, - spec: &flatSpec, - pathLoader: loaderFromOptions(options), - }, nil -} - -// Spec loads a new spec document from a local or remote path -func Spec(path string, options ...LoaderOption) (*Document, error) { - ldr := loaderFromOptions(options) - - b, err := ldr.Load(path) - if err != nil { - return nil, err - } - - document, err := Analyzed(b, "", options...) - if err != nil { - return nil, err - } - - document.specFilePath = path - document.pathLoader = ldr - - return document, nil -} - -// Analyzed creates a new analyzed spec document for a root json.RawMessage. -func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*Document, error) { - if version == "" { - version = "2.0" - } - if version != "2.0" { - return nil, fmt.Errorf("spec version %q is not supported", version) - } - - raw, err := trimData(data) // trim blanks, then convert yaml docs into json - if err != nil { - return nil, err - } - - swspec := new(spec.Swagger) - if err = json.Unmarshal(raw, swspec); err != nil { - return nil, err - } - - origsqspec, err := cloneSpec(swspec) - if err != nil { - return nil, err - } - - d := &Document{ - Analyzer: analysis.New(swspec), // NOTE: at this moment, analysis does not follow $refs to documents outside the root doc - schema: spec.MustLoadSwagger20Schema(), - spec: swspec, - raw: raw, - origSpec: origsqspec, - pathLoader: loaderFromOptions(options), - } - - return d, nil -} - -func trimData(in json.RawMessage) (json.RawMessage, error) { - trimmed := bytes.TrimSpace(in) - if len(trimmed) == 0 { - return in, nil - } - - if trimmed[0] == '{' || trimmed[0] == '[' { - return trimmed, nil - } - - // assume yaml doc: convert it to json - yml, err := swag.BytesToYAMLDoc(trimmed) - if err != nil { - return nil, fmt.Errorf("analyzed: %v", err) - } - - d, err := swag.YAMLToJSON(yml) - if err != nil { - return nil, fmt.Errorf("analyzed: %v", err) - } - - return d, nil -} - -// Expanded expands the $ref fields in the spec document and returns a new spec document -func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { - swspec := new(spec.Swagger) - if err := json.Unmarshal(d.raw, swspec); err != nil { - return nil, err - } - - var expandOptions *spec.ExpandOptions - if len(options) > 0 { - expandOptions = options[0] - if expandOptions.RelativeBase == "" { - expandOptions.RelativeBase = d.specFilePath - } - } else { - expandOptions = &spec.ExpandOptions{ - RelativeBase: d.specFilePath, - } - } - - if expandOptions.PathLoader == nil { - if d.pathLoader != nil { - // use loader from Document options - expandOptions.PathLoader = d.pathLoader.Load - } else { - // use package level loader - expandOptions.PathLoader = loaders.Load - } - } - - if err := spec.ExpandSpec(swspec, expandOptions); err != nil { - return nil, err - } - - dd := &Document{ - Analyzer: analysis.New(swspec), - spec: swspec, - specFilePath: d.specFilePath, - schema: spec.MustLoadSwagger20Schema(), - raw: d.raw, - origSpec: d.origSpec, - } - return dd, nil -} - -// BasePath the base path for the API specified by this spec -func (d *Document) BasePath() string { - return d.spec.BasePath -} - -// Version returns the version of this spec -func (d *Document) Version() string { - return d.spec.Swagger -} - -// Schema returns the swagger 2.0 schema -func (d *Document) Schema() *spec.Schema { - return d.schema -} - -// Spec returns the swagger spec object model -func (d *Document) Spec() *spec.Swagger { - return d.spec -} - -// Host returns the host for the API -func (d *Document) Host() string { - return d.spec.Host -} - -// Raw returns the raw swagger spec as json bytes -func (d *Document) Raw() json.RawMessage { - return d.raw -} - -// OrigSpec yields the original spec -func (d *Document) OrigSpec() *spec.Swagger { - return d.origSpec -} - -// ResetDefinitions gives a shallow copy with the models reset to the original spec -func (d *Document) ResetDefinitions() *Document { - defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) - for k, v := range d.origSpec.Definitions { - defs[k] = v - } - - d.spec.Definitions = defs - return d -} - -// Pristine creates a new pristine document instance based on the input data -func (d *Document) Pristine() *Document { - raw, _ := json.Marshal(d.Spec()) - dd, _ := Analyzed(raw, d.Version()) - dd.pathLoader = d.pathLoader - dd.specFilePath = d.specFilePath - - return dd -} - -// SpecFilePath returns the file path of the spec if one is defined -func (d *Document) SpecFilePath() string { - return d.specFilePath -} - -func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { - var b bytes.Buffer - if err := gob.NewEncoder(&b).Encode(src); err != nil { - return nil, err - } - - var dst spec.Swagger - if err := gob.NewDecoder(&b).Decode(&dst); err != nil { - return nil, err - } - return &dst, nil -} diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig deleted file mode 100644 index 3152da69..00000000 --- a/vendor/github.com/go-openapi/runtime/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/runtime/.gitattributes b/vendor/github.com/go-openapi/runtime/.gitattributes deleted file mode 100644 index d207b180..00000000 --- a/vendor/github.com/go-openapi/runtime/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.go text eol=lf diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore deleted file mode 100644 index fea8b84e..00000000 --- a/vendor/github.com/go-openapi/runtime/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -secrets.yml -coverage.out -*.cov -*.out -playground diff --git a/vendor/github.com/go-openapi/runtime/.golangci.yml b/vendor/github.com/go-openapi/runtime/.golangci.yml deleted file mode 100644 index 1c75557b..00000000 --- a/vendor/github.com/go-openapi/runtime/.golangci.yml +++ /dev/null @@ -1,62 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 3 - -linters: - enable-all: true - disable: - - nilerr # nilerr crashes on this repo - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/LICENSE b/vendor/github.com/go-openapi/runtime/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/runtime/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md deleted file mode 100644 index b07e0ad9..00000000 --- a/vendor/github.com/go-openapi/runtime/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# runtime [![Build Status](https://github.com/go-openapi/runtime/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/runtime/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) - -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/runtime.svg)](https://pkg.go.dev/github.com/go-openapi/runtime) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/runtime)](https://goreportcard.com/report/github.com/go-openapi/runtime) - -# go OpenAPI toolkit runtime - -The runtime component for use in code generation or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go deleted file mode 100644 index f8fb4822..00000000 --- a/vendor/github.com/go-openapi/runtime/bytestream.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - - "github.com/go-openapi/swag" -) - -func defaultCloser() error { return nil } - -type byteStreamOpt func(opts *byteStreamOpts) - -// ClosesStream when the bytestream consumer or producer is finished -func ClosesStream(opts *byteStreamOpts) { - opts.Close = true -} - -type byteStreamOpts struct { - Close bool -} - -// ByteStreamConsumer creates a consumer for byte streams. -// -// The consumer consumes from a provided reader into the data passed by reference. -// -// Supported output underlying types and interfaces, prioritized in this order: -// - io.ReaderFrom (for maximum control) -// - io.Writer (performs io.Copy) -// - encoding.BinaryUnmarshaler -// - *string -// - *[]byte -func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { - var vals byteStreamOpts - for _, opt := range opts { - opt(&vals) - } - - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("ByteStreamConsumer requires a reader") // early exit - } - if data == nil { - return errors.New("nil destination for ByteStreamConsumer") - } - - closer := defaultCloser - if vals.Close { - if cl, isReaderCloser := reader.(io.Closer); isReaderCloser { - closer = cl.Close - } - } - defer func() { - _ = closer() - }() - - if readerFrom, isReaderFrom := data.(io.ReaderFrom); isReaderFrom { - _, err := readerFrom.ReadFrom(reader) - return err - } - - if writer, isDataWriter := data.(io.Writer); isDataWriter { - _, err := io.Copy(writer, reader) - return err - } - - // buffers input before writing to data - var buf bytes.Buffer - _, err := buf.ReadFrom(reader) - if err != nil { - return err - } - b := buf.Bytes() - - switch destinationPointer := data.(type) { - case encoding.BinaryUnmarshaler: - return destinationPointer.UnmarshalBinary(b) - case *any: - switch (*destinationPointer).(type) { - case string: - *destinationPointer = string(b) - - return nil - - case []byte: - *destinationPointer = b - - return nil - } - default: - // check for the underlying type to be pointer to []byte or string, - if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr { - return errors.New("destination must be a pointer") - } - - v := reflect.Indirect(reflect.ValueOf(data)) - t := v.Type() - - switch { - case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: - v.SetBytes(b) - return nil - - case t.Kind() == reflect.String: - v.SetString(string(b)) - return nil - } - } - - return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", - data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") - }) -} - -// ByteStreamProducer creates a producer for byte streams. -// -// The producer takes input data then writes to an output writer (essentially as a pipe). -// -// Supported input underlying types and interfaces, prioritized in this order: -// - io.WriterTo (for maximum control) -// - io.Reader (performs io.Copy). A ReadCloser is closed before exiting. -// - encoding.BinaryMarshaler -// - error (writes as a string) -// - []byte -// - string -// - struct, other slices: writes as JSON -func ByteStreamProducer(opts ...byteStreamOpt) Producer { - var vals byteStreamOpts - for _, opt := range opts { - opt(&vals) - } - - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("ByteStreamProducer requires a writer") // early exit - } - if data == nil { - return errors.New("nil data for ByteStreamProducer") - } - - closer := defaultCloser - if vals.Close { - if cl, isWriterCloser := writer.(io.Closer); isWriterCloser { - closer = cl.Close - } - } - defer func() { - _ = closer() - }() - - if rc, isDataCloser := data.(io.ReadCloser); isDataCloser { - defer rc.Close() - } - - switch origin := data.(type) { - case io.WriterTo: - _, err := origin.WriteTo(writer) - return err - - case io.Reader: - _, err := io.Copy(writer, origin) - return err - - case encoding.BinaryMarshaler: - bytes, err := origin.MarshalBinary() - if err != nil { - return err - } - - _, err = writer.Write(bytes) - return err - - case error: - _, err := writer.Write([]byte(origin.Error())) - return err - - default: - v := reflect.Indirect(reflect.ValueOf(data)) - t := v.Type() - - switch { - case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: - _, err := writer.Write(v.Bytes()) - return err - - case t.Kind() == reflect.String: - _, err := writer.Write([]byte(v.String())) - return err - - case t.Kind() == reflect.Struct || t.Kind() == reflect.Slice: - b, err := swag.WriteJSON(data) - if err != nil { - return err - } - - _, err = writer.Write(b) - return err - } - } - - return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", - data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") - }) -} diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go deleted file mode 100644 index c6c97d9a..00000000 --- a/vendor/github.com/go-openapi/runtime/client_auth_info.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import "github.com/go-openapi/strfmt" - -// A ClientAuthInfoWriterFunc converts a function to a request writer interface -type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error - -// AuthenticateRequest adds authentication data to the request -func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { - return fn(req, reg) -} - -// A ClientAuthInfoWriter implementor knows how to write authentication info to a request -type ClientAuthInfoWriter interface { - AuthenticateRequest(ClientRequest, strfmt.Registry) error -} diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go deleted file mode 100644 index 5a5d6356..00000000 --- a/vendor/github.com/go-openapi/runtime/client_operation.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "context" - "net/http" -) - -// ClientOperation represents the context for a swagger operation to be submitted to the transport -type ClientOperation struct { - ID string - Method string - PathPattern string - ProducesMediaTypes []string - ConsumesMediaTypes []string - Schemes []string - AuthInfo ClientAuthInfoWriter - Params ClientRequestWriter - Reader ClientResponseReader - Context context.Context //nolint:containedctx // we precisely want this type to contain the request context - Client *http.Client -} - -// A ClientTransport implementor knows how to submit Request objects to some destination -type ClientTransport interface { - // Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) - Submit(*ClientOperation) (interface{}, error) -} diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go deleted file mode 100644 index 4ebb2dea..00000000 --- a/vendor/github.com/go-openapi/runtime/client_request.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "io" - "net/http" - "net/url" - "time" - - "github.com/go-openapi/strfmt" -) - -// ClientRequestWriterFunc converts a function to a request writer interface -type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error - -// WriteToRequest adds data to the request -func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { - return fn(req, reg) -} - -// ClientRequestWriter is an interface for things that know how to write to a request -type ClientRequestWriter interface { - WriteToRequest(ClientRequest, strfmt.Registry) error -} - -// ClientRequest is an interface for things that know how to -// add information to a swagger client request. -type ClientRequest interface { //nolint:interfacebloat // a swagger-capable request is quite rich, hence the many getter/setters - SetHeaderParam(string, ...string) error - - GetHeaderParams() http.Header - - SetQueryParam(string, ...string) error - - SetFormParam(string, ...string) error - - SetPathParam(string, string) error - - GetQueryParams() url.Values - - SetFileParam(string, ...NamedReadCloser) error - - SetBodyParam(interface{}) error - - SetTimeout(time.Duration) error - - GetMethod() string - - GetPath() string - - GetBody() []byte - - GetBodyParam() interface{} - - GetFileParam() map[string][]NamedReadCloser -} - -// NamedReadCloser represents a named ReadCloser interface -type NamedReadCloser interface { - io.ReadCloser - Name() string -} - -// NamedReader creates a NamedReadCloser for use as file upload -func NamedReader(name string, rdr io.Reader) NamedReadCloser { - rc, ok := rdr.(io.ReadCloser) - if !ok { - rc = io.NopCloser(rdr) - } - return &namedReadCloser{ - name: name, - cr: rc, - } -} - -type namedReadCloser struct { - name string - cr io.ReadCloser -} - -func (n *namedReadCloser) Close() error { - return n.cr.Close() -} -func (n *namedReadCloser) Read(p []byte) (int, error) { - return n.cr.Read(p) -} -func (n *namedReadCloser) Name() string { - return n.name -} - -type TestClientRequest struct { - Headers http.Header - Body interface{} -} - -func (t *TestClientRequest) SetHeaderParam(name string, values ...string) error { - if t.Headers == nil { - t.Headers = make(http.Header) - } - t.Headers.Set(name, values[0]) - return nil -} - -func (t *TestClientRequest) SetQueryParam(_ string, _ ...string) error { return nil } - -func (t *TestClientRequest) SetFormParam(_ string, _ ...string) error { return nil } - -func (t *TestClientRequest) SetPathParam(_ string, _ string) error { return nil } - -func (t *TestClientRequest) SetFileParam(_ string, _ ...NamedReadCloser) error { return nil } - -func (t *TestClientRequest) SetBodyParam(body interface{}) error { - t.Body = body - return nil -} - -func (t *TestClientRequest) SetTimeout(time.Duration) error { - return nil -} - -func (t *TestClientRequest) GetQueryParams() url.Values { return nil } - -func (t *TestClientRequest) GetMethod() string { return "" } - -func (t *TestClientRequest) GetPath() string { return "" } - -func (t *TestClientRequest) GetBody() []byte { return nil } - -func (t *TestClientRequest) GetBodyParam() interface{} { - return t.Body -} - -func (t *TestClientRequest) GetFileParam() map[string][]NamedReadCloser { - return nil -} - -func (t *TestClientRequest) GetHeaderParams() http.Header { - return t.Headers -} diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go deleted file mode 100644 index 0d169114..00000000 --- a/vendor/github.com/go-openapi/runtime/client_response.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/json" - "fmt" - "io" -) - -// A ClientResponse represents a client response -// This bridges between responses obtained from different transports -type ClientResponse interface { - Code() int - Message() string - GetHeader(string) string - GetHeaders(string) []string - Body() io.ReadCloser -} - -// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation -type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error) - -// ReadResponse reads the response -func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) { - return read(resp, consumer) -} - -// A ClientResponseReader is an interface for things want to read a response. -// An application of this is to create structs from response values -type ClientResponseReader interface { - ReadResponse(ClientResponse, Consumer) (interface{}, error) -} - -// NewAPIError creates a new API error -func NewAPIError(opName string, payload interface{}, code int) *APIError { - return &APIError{ - OperationName: opName, - Response: payload, - Code: code, - } -} - -// APIError wraps an error model and captures the status code -type APIError struct { - OperationName string - Response interface{} - Code int -} - -func (o *APIError) Error() string { - var resp []byte - if err, ok := o.Response.(error); ok { - resp = []byte("'" + err.Error() + "'") - } else { - resp, _ = json.Marshal(o.Response) - } - return fmt.Sprintf("%s (status %d): %s", o.OperationName, o.Code, resp) -} - -func (o *APIError) String() string { - return o.Error() -} - -// IsSuccess returns true when this elapse o k response returns a 2xx status code -func (o *APIError) IsSuccess() bool { - return o.Code/100 == 2 -} - -// IsRedirect returns true when this elapse o k response returns a 3xx status code -func (o *APIError) IsRedirect() bool { - return o.Code/100 == 3 -} - -// IsClientError returns true when this elapse o k response returns a 4xx status code -func (o *APIError) IsClientError() bool { - return o.Code/100 == 4 -} - -// IsServerError returns true when this elapse o k response returns a 5xx status code -func (o *APIError) IsServerError() bool { - return o.Code/100 == 5 -} - -// IsCode returns true when this elapse o k response returns a 4xx status code -func (o *APIError) IsCode(code int) bool { - return o.Code == code -} - -// A ClientResponseStatus is a common interface implemented by all responses on the generated code -// You can use this to treat any client response based on status code -type ClientResponseStatus interface { - IsSuccess() bool - IsRedirect() bool - IsClientError() bool - IsServerError() bool - IsCode(int) bool -} diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go deleted file mode 100644 index 51596924..00000000 --- a/vendor/github.com/go-openapi/runtime/constants.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -const ( - // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type - HeaderContentType = "Content-Type" - - // HeaderTransferEncoding represents a http transfer-encoding header. - HeaderTransferEncoding = "Transfer-Encoding" - - // HeaderAccept the Accept header - HeaderAccept = "Accept" - // HeaderAuthorization the Authorization header - HeaderAuthorization = "Authorization" - - charsetKey = "charset" - - // DefaultMime the default fallback mime type - DefaultMime = "application/octet-stream" - // JSONMime the json mime type - JSONMime = "application/json" - // YAMLMime the yaml mime type - YAMLMime = "application/x-yaml" - // XMLMime the xml mime type - XMLMime = "application/xml" - // TextMime the text mime type - TextMime = "text/plain" - // HTMLMime the html mime type - HTMLMime = "text/html" - // CSVMime the csv mime type - CSVMime = "text/csv" - // MultipartFormMime the multipart form mime type - MultipartFormMime = "multipart/form-data" - // URLencodedFormMime the url encoded form mime type - URLencodedFormMime = "application/x-www-form-urlencoded" -) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go deleted file mode 100644 index c9597bcd..00000000 --- a/vendor/github.com/go-openapi/runtime/csv.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "context" - "encoding" - "encoding/csv" - "errors" - "fmt" - "io" - "reflect" - - "golang.org/x/sync/errgroup" -) - -// CSVConsumer creates a new CSV consumer. -// -// The consumer consumes CSV records from a provided reader into the data passed by reference. -// -// CSVOpts options may be specified to alter the default CSV behavior on the reader and the writer side (e.g. separator, skip header, ...). -// The defaults are those of the standard library's csv.Reader and csv.Writer. -// -// Supported output underlying types and interfaces, prioritized in this order: -// - *csv.Writer -// - CSVWriter (writer options are ignored) -// - io.Writer (as raw bytes) -// - io.ReaderFrom (as raw bytes) -// - encoding.BinaryUnmarshaler (as raw bytes) -// - *[][]string (as a collection of records) -// - *[]byte (as raw bytes) -// - *string (a raw bytes) -// -// The consumer prioritizes situations where buffering the input is not required. -func CSVConsumer(opts ...CSVOpt) Consumer { - o := csvOptsWithDefaults(opts) - - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("CSVConsumer requires a reader") - } - if data == nil { - return errors.New("nil destination for CSVConsumer") - } - - csvReader := csv.NewReader(reader) - o.applyToReader(csvReader) - closer := defaultCloser - if o.closeStream { - if cl, isReaderCloser := reader.(io.Closer); isReaderCloser { - closer = cl.Close - } - } - defer func() { - _ = closer() - }() - - switch destination := data.(type) { - case *csv.Writer: - csvWriter := destination - o.applyToWriter(csvWriter) - - return pipeCSV(csvWriter, csvReader, o) - - case CSVWriter: - csvWriter := destination - // no writer options available - - return pipeCSV(csvWriter, csvReader, o) - - case io.Writer: - csvWriter := csv.NewWriter(destination) - o.applyToWriter(csvWriter) - - return pipeCSV(csvWriter, csvReader, o) - - case io.ReaderFrom: - var buf bytes.Buffer - csvWriter := csv.NewWriter(&buf) - o.applyToWriter(csvWriter) - if err := bufferedCSV(csvWriter, csvReader, o); err != nil { - return err - } - _, err := destination.ReadFrom(&buf) - - return err - - case encoding.BinaryUnmarshaler: - var buf bytes.Buffer - csvWriter := csv.NewWriter(&buf) - o.applyToWriter(csvWriter) - if err := bufferedCSV(csvWriter, csvReader, o); err != nil { - return err - } - - return destination.UnmarshalBinary(buf.Bytes()) - - default: - // support *[][]string, *[]byte, *string - if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr { - return errors.New("destination must be a pointer") - } - - v := reflect.Indirect(reflect.ValueOf(data)) - t := v.Type() - - switch { - case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String: - csvWriter := &csvRecordsWriter{} - // writer options are ignored - if err := pipeCSV(csvWriter, csvReader, o); err != nil { - return err - } - - v.Grow(len(csvWriter.records)) - v.SetCap(len(csvWriter.records)) // in case Grow was unnessary, trim down the capacity - v.SetLen(len(csvWriter.records)) - reflect.Copy(v, reflect.ValueOf(csvWriter.records)) - - return nil - - case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: - var buf bytes.Buffer - csvWriter := csv.NewWriter(&buf) - o.applyToWriter(csvWriter) - if err := bufferedCSV(csvWriter, csvReader, o); err != nil { - return err - } - v.SetBytes(buf.Bytes()) - - return nil - - case t.Kind() == reflect.String: - var buf bytes.Buffer - csvWriter := csv.NewWriter(&buf) - o.applyToWriter(csvWriter) - if err := bufferedCSV(csvWriter, csvReader, o); err != nil { - return err - } - v.SetString(buf.String()) - - return nil - - default: - return fmt.Errorf("%v (%T) is not supported by the CSVConsumer, %s", - data, data, "can be resolved by supporting CSVWriter/Writer/BinaryUnmarshaler interface", - ) - } - } - }) -} - -// CSVProducer creates a new CSV producer. -// -// The producer takes input data then writes as CSV to an output writer (essentially as a pipe). -// -// Supported input underlying types and interfaces, prioritized in this order: -// - *csv.Reader -// - CSVReader (reader options are ignored) -// - io.Reader -// - io.WriterTo -// - encoding.BinaryMarshaler -// - [][]string -// - []byte -// - string -// -// The producer prioritizes situations where buffering the input is not required. -func CSVProducer(opts ...CSVOpt) Producer { - o := csvOptsWithDefaults(opts) - - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("CSVProducer requires a writer") - } - if data == nil { - return errors.New("nil data for CSVProducer") - } - - csvWriter := csv.NewWriter(writer) - o.applyToWriter(csvWriter) - closer := defaultCloser - if o.closeStream { - if cl, isWriterCloser := writer.(io.Closer); isWriterCloser { - closer = cl.Close - } - } - defer func() { - _ = closer() - }() - - if rc, isDataCloser := data.(io.ReadCloser); isDataCloser { - defer rc.Close() - } - - switch origin := data.(type) { - case *csv.Reader: - csvReader := origin - o.applyToReader(csvReader) - - return pipeCSV(csvWriter, csvReader, o) - - case CSVReader: - csvReader := origin - // no reader options available - - return pipeCSV(csvWriter, csvReader, o) - - case io.Reader: - csvReader := csv.NewReader(origin) - o.applyToReader(csvReader) - - return pipeCSV(csvWriter, csvReader, o) - - case io.WriterTo: - // async piping of the writes performed by WriteTo - r, w := io.Pipe() - csvReader := csv.NewReader(r) - o.applyToReader(csvReader) - - pipe, _ := errgroup.WithContext(context.Background()) - pipe.Go(func() error { - _, err := origin.WriteTo(w) - _ = w.Close() - return err - }) - - pipe.Go(func() error { - defer func() { - _ = r.Close() - }() - - return pipeCSV(csvWriter, csvReader, o) - }) - - return pipe.Wait() - - case encoding.BinaryMarshaler: - buf, err := origin.MarshalBinary() - if err != nil { - return err - } - rdr := bytes.NewBuffer(buf) - csvReader := csv.NewReader(rdr) - - return bufferedCSV(csvWriter, csvReader, o) - - default: - // support [][]string, []byte, string (or pointers to those) - v := reflect.Indirect(reflect.ValueOf(data)) - t := v.Type() - - switch { - case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String: - csvReader := &csvRecordsWriter{ - records: make([][]string, v.Len()), - } - reflect.Copy(reflect.ValueOf(csvReader.records), v) - - return pipeCSV(csvWriter, csvReader, o) - - case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: - buf := bytes.NewBuffer(v.Bytes()) - csvReader := csv.NewReader(buf) - o.applyToReader(csvReader) - - return bufferedCSV(csvWriter, csvReader, o) - - case t.Kind() == reflect.String: - buf := bytes.NewBufferString(v.String()) - csvReader := csv.NewReader(buf) - o.applyToReader(csvReader) - - return bufferedCSV(csvWriter, csvReader, o) - - default: - return fmt.Errorf("%v (%T) is not supported by the CSVProducer, %s", - data, data, "can be resolved by supporting CSVReader/Reader/BinaryMarshaler interface", - ) - } - } - }) -} - -// pipeCSV copies CSV records from a CSV reader to a CSV writer -func pipeCSV(csvWriter CSVWriter, csvReader CSVReader, opts csvOpts) error { - for ; opts.skippedLines > 0; opts.skippedLines-- { - _, err := csvReader.Read() - if err != nil { - if errors.Is(err, io.EOF) { - return nil - } - - return err - } - } - - for { - record, err := csvReader.Read() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - - return err - } - - if err := csvWriter.Write(record); err != nil { - return err - } - } - - csvWriter.Flush() - - return csvWriter.Error() -} - -// bufferedCSV copies CSV records from a CSV reader to a CSV writer, -// by first reading all records then writing them at once. -func bufferedCSV(csvWriter *csv.Writer, csvReader *csv.Reader, opts csvOpts) error { - for ; opts.skippedLines > 0; opts.skippedLines-- { - _, err := csvReader.Read() - if err != nil { - if errors.Is(err, io.EOF) { - return nil - } - - return err - } - } - - records, err := csvReader.ReadAll() - if err != nil { - return err - } - - return csvWriter.WriteAll(records) -} diff --git a/vendor/github.com/go-openapi/runtime/csv_options.go b/vendor/github.com/go-openapi/runtime/csv_options.go deleted file mode 100644 index c16464c5..00000000 --- a/vendor/github.com/go-openapi/runtime/csv_options.go +++ /dev/null @@ -1,121 +0,0 @@ -package runtime - -import ( - "encoding/csv" - "io" -) - -// CSVOpts alter the behavior of the CSV consumer or producer. -type CSVOpt func(*csvOpts) - -type csvOpts struct { - csvReader csv.Reader - csvWriter csv.Writer - skippedLines int - closeStream bool -} - -// WithCSVReaderOpts specifies the options to csv.Reader -// when reading CSV. -func WithCSVReaderOpts(reader csv.Reader) CSVOpt { - return func(o *csvOpts) { - o.csvReader = reader - } -} - -// WithCSVWriterOpts specifies the options to csv.Writer -// when writing CSV. -func WithCSVWriterOpts(writer csv.Writer) CSVOpt { - return func(o *csvOpts) { - o.csvWriter = writer - } -} - -// WithCSVSkipLines will skip header lines. -func WithCSVSkipLines(skipped int) CSVOpt { - return func(o *csvOpts) { - o.skippedLines = skipped - } -} - -func WithCSVClosesStream() CSVOpt { - return func(o *csvOpts) { - o.closeStream = true - } -} - -func (o csvOpts) applyToReader(in *csv.Reader) { - if o.csvReader.Comma != 0 { - in.Comma = o.csvReader.Comma - } - if o.csvReader.Comment != 0 { - in.Comment = o.csvReader.Comment - } - if o.csvReader.FieldsPerRecord != 0 { - in.FieldsPerRecord = o.csvReader.FieldsPerRecord - } - - in.LazyQuotes = o.csvReader.LazyQuotes - in.TrimLeadingSpace = o.csvReader.TrimLeadingSpace - in.ReuseRecord = o.csvReader.ReuseRecord -} - -func (o csvOpts) applyToWriter(in *csv.Writer) { - if o.csvWriter.Comma != 0 { - in.Comma = o.csvWriter.Comma - } - in.UseCRLF = o.csvWriter.UseCRLF -} - -func csvOptsWithDefaults(opts []CSVOpt) csvOpts { - var o csvOpts - for _, apply := range opts { - apply(&o) - } - - return o -} - -type CSVWriter interface { - Write([]string) error - Flush() - Error() error -} - -type CSVReader interface { - Read() ([]string, error) -} - -var ( - _ CSVWriter = &csvRecordsWriter{} - _ CSVReader = &csvRecordsWriter{} -) - -// csvRecordsWriter is an internal container to move CSV records back and forth -type csvRecordsWriter struct { - i int - records [][]string -} - -func (w *csvRecordsWriter) Write(record []string) error { - w.records = append(w.records, record) - - return nil -} - -func (w *csvRecordsWriter) Read() ([]string, error) { - if w.i >= len(w.records) { - return nil, io.EOF - } - defer func() { - w.i++ - }() - - return w.records[w.i], nil -} - -func (w *csvRecordsWriter) Flush() {} - -func (w *csvRecordsWriter) Error() error { - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go deleted file mode 100644 index 0d390cfd..00000000 --- a/vendor/github.com/go-openapi/runtime/discard.go +++ /dev/null @@ -1,9 +0,0 @@ -package runtime - -import "io" - -// DiscardConsumer does absolutely nothing, it's a black hole. -var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil }) - -// DiscardProducer does absolutely nothing, it's a black hole. -var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil }) diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go deleted file mode 100644 index 397d8a45..00000000 --- a/vendor/github.com/go-openapi/runtime/file.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import "github.com/go-openapi/swag" - -type File = swag.File diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go deleted file mode 100644 index 4d111db4..00000000 --- a/vendor/github.com/go-openapi/runtime/headers.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "mime" - "net/http" - - "github.com/go-openapi/errors" -) - -// ContentType parses a content type header -func ContentType(headers http.Header) (string, string, error) { - ct := headers.Get(HeaderContentType) - orig := ct - if ct == "" { - ct = DefaultMime - } - if ct == "" { - return "", "", nil - } - - mt, opts, err := mime.ParseMediaType(ct) - if err != nil { - return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) - } - - if cs, ok := opts[charsetKey]; ok { - return mt, cs, nil - } - - return mt, "", nil -} diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go deleted file mode 100644 index e3341286..00000000 --- a/vendor/github.com/go-openapi/runtime/interfaces.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "context" - "io" - "net/http" - - "github.com/go-openapi/strfmt" -) - -// OperationHandlerFunc an adapter for a function to the OperationHandler interface -type OperationHandlerFunc func(interface{}) (interface{}, error) - -// Handle implements the operation handler interface -func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) { - return s(data) -} - -// OperationHandler a handler for a swagger operation -type OperationHandler interface { - Handle(interface{}) (interface{}, error) -} - -// ConsumerFunc represents a function that can be used as a consumer -type ConsumerFunc func(io.Reader, interface{}) error - -// Consume consumes the reader into the data parameter -func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error { - return fn(reader, data) -} - -// Consumer implementations know how to bind the values on the provided interface to -// data provided by the request body -type Consumer interface { - // Consume performs the binding of request values - Consume(io.Reader, interface{}) error -} - -// ProducerFunc represents a function that can be used as a producer -type ProducerFunc func(io.Writer, interface{}) error - -// Produce produces the response for the provided data -func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error { - return f(writer, data) -} - -// Producer implementations know how to turn the provided interface into a valid -// HTTP response -type Producer interface { - // Produce writes to the http response - Produce(io.Writer, interface{}) error -} - -// AuthenticatorFunc turns a function into an authenticator -type AuthenticatorFunc func(interface{}) (bool, interface{}, error) - -// Authenticate authenticates the request with the provided data -func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) { - return f(params) -} - -// Authenticator represents an authentication strategy -// implementations of Authenticator know how to authenticate the -// request data and translate that into a valid principal object or an error -type Authenticator interface { - Authenticate(interface{}) (bool, interface{}, error) -} - -// AuthorizerFunc turns a function into an authorizer -type AuthorizerFunc func(*http.Request, interface{}) error - -// Authorize authorizes the processing of the request for the principal -func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error { - return f(r, principal) -} - -// Authorizer represents an authorization strategy -// implementations of Authorizer know how to authorize the principal object -// using the request data and returns error if unauthorized -type Authorizer interface { - Authorize(*http.Request, interface{}) error -} - -// Validatable types implementing this interface allow customizing their validation -// this will be used instead of the reflective validation based on the spec document. -// the implementations are assumed to have been generated by the swagger tool so they should -// contain all the validations obtained from the spec -type Validatable interface { - Validate(strfmt.Registry) error -} - -// ContextValidatable types implementing this interface allow customizing their validation -// this will be used instead of the reflective validation based on the spec document. -// the implementations are assumed to have been generated by the swagger tool so they should -// contain all the context validations obtained from the spec -type ContextValidatable interface { - ContextValidate(context.Context, strfmt.Registry) error -} diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go deleted file mode 100644 index 5a690559..00000000 --- a/vendor/github.com/go-openapi/runtime/json.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/json" - "io" -) - -// JSONConsumer creates a new JSON consumer -func JSONConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - dec := json.NewDecoder(reader) - dec.UseNumber() // preserve number formats - return dec.Decode(data) - }) -} - -// JSONProducer creates a new JSON producer -func JSONProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - enc := json.NewEncoder(writer) - enc.SetEscapeHTML(false) - return enc.Encode(data) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go deleted file mode 100644 index 9e3e1ecb..00000000 --- a/vendor/github.com/go-openapi/runtime/request.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bufio" - "context" - "errors" - "io" - "net/http" - "strings" - - "github.com/go-openapi/swag" -) - -// CanHaveBody returns true if this method can have a body -func CanHaveBody(method string) bool { - mn := strings.ToUpper(method) - return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" -} - -// IsSafe returns true if this is a request with a safe method -func IsSafe(r *http.Request) bool { - mn := strings.ToUpper(r.Method) - return mn == "GET" || mn == "HEAD" -} - -// AllowsBody returns true if the request allows for a body -func AllowsBody(r *http.Request) bool { - mn := strings.ToUpper(r.Method) - return mn != "HEAD" -} - -// HasBody returns true if this method needs a content-type -func HasBody(r *http.Request) bool { - // happy case: we have a content length set - if r.ContentLength > 0 { - return true - } - - if r.Header.Get("content-length") != "" { - // in this case, no Transfer-Encoding should be present - // we have a header set but it was explicitly set to 0, so we assume no body - return false - } - - rdr := newPeekingReader(r.Body) - r.Body = rdr - return rdr.HasContent() -} - -func newPeekingReader(r io.ReadCloser) *peekingReader { - if r == nil { - return nil - } - return &peekingReader{ - underlying: bufio.NewReader(r), - orig: r, - } -} - -type peekingReader struct { - underlying interface { - Buffered() int - Peek(int) ([]byte, error) - Read([]byte) (int, error) - } - orig io.ReadCloser -} - -func (p *peekingReader) HasContent() bool { - if p == nil { - return false - } - if p.underlying.Buffered() > 0 { - return true - } - b, err := p.underlying.Peek(1) - if err != nil { - return false - } - return len(b) > 0 -} - -func (p *peekingReader) Read(d []byte) (int, error) { - if p == nil { - return 0, io.EOF - } - if p.underlying == nil { - return 0, io.ErrUnexpectedEOF - } - return p.underlying.Read(d) -} - -func (p *peekingReader) Close() error { - if p.underlying == nil { - return errors.New("reader already closed") - } - p.underlying = nil - if p.orig != nil { - return p.orig.Close() - } - return nil -} - -// JSONRequest creates a new http request with json headers set. -// -// It uses context.Background. -func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body) - if err != nil { - return nil, err - } - req.Header.Add(HeaderContentType, JSONMime) - req.Header.Add(HeaderAccept, JSONMime) - return req, nil -} - -// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) -type Gettable interface { - GetOK(string) ([]string, bool, bool) -} - -// ReadSingleValue reads a single value from the source -func ReadSingleValue(values Gettable, name string) string { - vv, _, hv := values.GetOK(name) - if hv { - return vv[len(vv)-1] - } - return "" -} - -// ReadCollectionValue reads a collection value from a string data source -func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { - v := ReadSingleValue(values, name) - return swag.SplitByFormat(v, collectionFormat) -} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go deleted file mode 100644 index 3b011a0b..00000000 --- a/vendor/github.com/go-openapi/runtime/statuses.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -// Statuses lists the most common HTTP status codes to default message -// taken from https://httpstatuses.com/ -var Statuses = map[int]string{ - 100: "Continue", - 101: "Switching Protocols", - 102: "Processing", - 103: "Checkpoint", - 122: "URI too long", - 200: "OK", - 201: "Created", - 202: "Accepted", - 203: "Request Processed", - 204: "No Content", - 205: "Reset Content", - 206: "Partial Content", - 207: "Multi-Status", - 208: "Already Reported", - 226: "IM Used", - 300: "Multiple Choices", - 301: "Moved Permanently", - 302: "Found", - 303: "See Other", - 304: "Not Modified", - 305: "Use Proxy", - 306: "Switch Proxy", - 307: "Temporary Redirect", - 308: "Permanent Redirect", - 400: "Bad Request", - 401: "Unauthorized", - 402: "Payment Required", - 403: "Forbidden", - 404: "Not Found", - 405: "Method Not Allowed", - 406: "Not Acceptable", - 407: "Proxy Authentication Required", - 408: "Request Timeout", - 409: "Conflict", - 410: "Gone", - 411: "Length Required", - 412: "Precondition Failed", - 413: "Request Entity Too Large", - 414: "Request-URI Too Long", - 415: "Unsupported Media Type", - 416: "Request Range Not Satisfiable", - 417: "Expectation Failed", - 418: "I'm a teapot", - 420: "Enhance Your Calm", - 422: "Unprocessable Entity", - 423: "Locked", - 424: "Failed Dependency", - 426: "Upgrade Required", - 428: "Precondition Required", - 429: "Too Many Requests", - 431: "Request Header Fields Too Large", - 444: "No Response", - 449: "Retry With", - 450: "Blocked by Windows Parental Controls", - 451: "Wrong Exchange Server", - 499: "Client Closed Request", - 500: "Internal Server Error", - 501: "Not Implemented", - 502: "Bad Gateway", - 503: "Service Unavailable", - 504: "Gateway Timeout", - 505: "HTTP Version Not Supported", - 506: "Variant Also Negotiates", - 507: "Insufficient Storage", - 508: "Loop Detected", - 509: "Bandwidth Limit Exceeded", - 510: "Not Extended", - 511: "Network Authentication Required", - 598: "Network read timeout error", - 599: "Network connect timeout error", -} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go deleted file mode 100644 index f33320b7..00000000 --- a/vendor/github.com/go-openapi/runtime/text.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - - "github.com/go-openapi/swag" -) - -// TextConsumer creates a new text consumer -func TextConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("TextConsumer requires a reader") // early exit - } - - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(reader) - if err != nil { - return err - } - b := buf.Bytes() - - // If the buffer is empty, no need to unmarshal it, which causes a panic. - if len(b) == 0 { - return nil - } - - if tu, ok := data.(encoding.TextUnmarshaler); ok { - err := tu.UnmarshalText(b) - if err != nil { - return fmt.Errorf("text consumer: %v", err) - } - - return nil - } - - t := reflect.TypeOf(data) - if data != nil && t.Kind() == reflect.Ptr { - v := reflect.Indirect(reflect.ValueOf(data)) - if t.Elem().Kind() == reflect.String { - v.SetString(string(b)) - return nil - } - } - - return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", - data, data, "can be resolved by supporting TextUnmarshaler interface") - }) -} - -// TextProducer creates a new text producer -func TextProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("TextProducer requires a writer") // early exit - } - - if data == nil { - return errors.New("no data given to produce text from") - } - - if tm, ok := data.(encoding.TextMarshaler); ok { - txt, err := tm.MarshalText() - if err != nil { - return fmt.Errorf("text producer: %v", err) - } - _, err = writer.Write(txt) - return err - } - - if str, ok := data.(error); ok { - _, err := writer.Write([]byte(str.Error())) - return err - } - - if str, ok := data.(fmt.Stringer); ok { - _, err := writer.Write([]byte(str.String())) - return err - } - - v := reflect.Indirect(reflect.ValueOf(data)) - if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { - b, err := swag.WriteJSON(data) - if err != nil { - return err - } - _, err = writer.Write(b) - return err - } - if v.Kind() != reflect.String { - return fmt.Errorf("%T is not a supported type by the TextProducer", data) - } - - _, err := writer.Write([]byte(v.String())) - return err - }) -} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go deleted file mode 100644 index 11f5732a..00000000 --- a/vendor/github.com/go-openapi/runtime/values.go +++ /dev/null @@ -1,19 +0,0 @@ -package runtime - -// Values typically represent parameters on a http request. -type Values map[string][]string - -// GetOK returns the values collection for the given key. -// When the key is present in the map it will return true for hasKey. -// When the value is not empty it will return true for hasValue. -func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { - value, hasKey = v[key] - if !hasKey { - return - } - if len(value) == 0 { - return - } - hasValue = true - return -} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go deleted file mode 100644 index 821c7393..00000000 --- a/vendor/github.com/go-openapi/runtime/xml.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/xml" - "io" -) - -// XMLConsumer creates a new XML consumer -func XMLConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - dec := xml.NewDecoder(reader) - return dec.Decode(data) - }) -} - -// XMLProducer creates a new XML producer -func XMLProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - enc := xml.NewEncoder(writer) - return enc.Encode(data) - }) -} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig deleted file mode 100644 index 3152da69..00000000 --- a/vendor/github.com/go-openapi/spec/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore deleted file mode 100644 index f47cb204..00000000 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml deleted file mode 100644 index 22f8d21c..00000000 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ /dev/null @@ -1,61 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 3 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/spec/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md deleted file mode 100644 index 7fd2810c..00000000 --- a/vendor/github.com/go-openapi/spec/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# OpenAPI v2 object model [![Build Status](https://github.com/go-openapi/spec/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) - -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) - -The object model for OpenAPI specification documents. - -### FAQ - -* What does this do? - -> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model -> 2. It knows how to resolve $ref and expand them to make a single root document - -* How does it play with the rest of the go-openapi packages ? - -> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger) -> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations -> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it -> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents - -* Does this library support OpenAPI 3? - -> No. -> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). -> There is no plan to make it evolve toward supporting OpenAPI 3.x. -> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. -> -> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 - -* Does the unmarshaling support YAML? - -> Not directly. The exposed types know only how to unmarshal from JSON. -> -> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by -> github.com/go-openapi/loads -> -> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec -> -> See also https://github.com/go-openapi/spec/issues/164 - -* How can I validate a spec? - -> Validation is provided by [the validate package](http://github.com/go-openapi/validate) - -* Why do we have an `ID` field for `Schema` which is not part of the swagger spec? - -> We found jsonschema compatibility more important: since `id` in jsonschema influences -> how `$ref` are resolved. -> This `id` does not conflict with any property named `id`. -> -> See also https://github.com/go-openapi/spec/issues/23 diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go deleted file mode 100644 index 122993b4..00000000 --- a/vendor/github.com/go-openapi/spec/cache.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "sync" -) - -// ResolutionCache a cache for resolving urls -type ResolutionCache interface { - Get(string) (interface{}, bool) - Set(string, interface{}) -} - -type simpleCache struct { - lock sync.RWMutex - store map[string]interface{} -} - -func (s *simpleCache) ShallowClone() ResolutionCache { - store := make(map[string]interface{}, len(s.store)) - s.lock.RLock() - for k, v := range s.store { - store[k] = v - } - s.lock.RUnlock() - - return &simpleCache{ - store: store, - } -} - -// Get retrieves a cached URI -func (s *simpleCache) Get(uri string) (interface{}, bool) { - s.lock.RLock() - v, ok := s.store[uri] - - s.lock.RUnlock() - return v, ok -} - -// Set caches a URI -func (s *simpleCache) Set(uri string, data interface{}) { - s.lock.Lock() - s.store[uri] = data - s.lock.Unlock() -} - -var ( - // resCache is a package level cache for $ref resolution and expansion. - // It is initialized lazily by methods that have the need for it: no - // memory is allocated unless some expander methods are called. - // - // It is initialized with JSON schema and swagger schema, - // which do not mutate during normal operations. - // - // All subsequent utilizations of this cache are produced from a shallow - // clone of this initial version. - resCache *simpleCache - onceCache sync.Once - - _ ResolutionCache = &simpleCache{} -) - -// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call. -func initResolutionCache() { - resCache = defaultResolutionCache() -} - -func defaultResolutionCache() *simpleCache { - return &simpleCache{store: map[string]interface{}{ - "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), - "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), - }} -} - -func cacheOrDefault(cache ResolutionCache) ResolutionCache { - onceCache.Do(initResolutionCache) - - if cache != nil { - return cache - } - - // get a shallow clone of the base cache with swagger and json schema - return resCache.ShallowClone() -} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go deleted file mode 100644 index 2f7bb219..00000000 --- a/vendor/github.com/go-openapi/spec/contact_info.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// ContactInfo contact information for the exposed API. -// -// For more information: http://goo.gl/8us55a#contactObject -type ContactInfo struct { - ContactInfoProps - VendorExtensible -} - -// ContactInfoProps hold the properties of a ContactInfo object -type ContactInfoProps struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` - Email string `json:"email,omitempty"` -} - -// UnmarshalJSON hydrates ContactInfo from json -func (c *ContactInfo) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { - return err - } - return json.Unmarshal(data, &c.VendorExtensible) -} - -// MarshalJSON produces ContactInfo as json -func (c ContactInfo) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(c.ContactInfoProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(c.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go deleted file mode 100644 index fc889f6d..00000000 --- a/vendor/github.com/go-openapi/spec/debug.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "fmt" - "log" - "os" - "path" - "runtime" -) - -// Debug is true when the SWAGGER_DEBUG env var is not empty. -// -// It enables a more verbose logging of this package. -var Debug = os.Getenv("SWAGGER_DEBUG") != "" - -var ( - // specLogger is a debug logger for this package - specLogger *log.Logger -) - -func init() { - debugOptions() -} - -func debugOptions() { - specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) -} - -func debugLog(msg string, args ...interface{}) { - // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() - if Debug { - _, file1, pos1, _ := runtime.Caller(1) - specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go deleted file mode 100644 index 1f428475..00000000 --- a/vendor/github.com/go-openapi/spec/embed.go +++ /dev/null @@ -1,17 +0,0 @@ -package spec - -import ( - "embed" - "path" -) - -//go:embed schemas/*.json schemas/*/*.json -var assets embed.FS - -func jsonschemaDraft04JSONBytes() ([]byte, error) { - return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json")) -} - -func v2SchemaJSONBytes() ([]byte, error) { - return assets.ReadFile(path.Join("schemas", "v2", "schema.json")) -} diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go deleted file mode 100644 index 6992c7ba..00000000 --- a/vendor/github.com/go-openapi/spec/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package spec - -import "errors" - -// Error codes -var ( - // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type - ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") - - // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer - ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") - - // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. - // At the moment, $ref are supported only inside: schemas, parameters, responses, path items - ErrDerefUnsupportedType = errors.New("deref: unsupported type") - - // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type - ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") -) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go deleted file mode 100644 index b81a5699..00000000 --- a/vendor/github.com/go-openapi/spec/expander.go +++ /dev/null @@ -1,607 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" -) - -// ExpandOptions provides options for the spec expander. -// -// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file. -// -// If left empty, the root document is assumed to be located in the current working directory: -// all relative $ref's will be resolved from there. -// -// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. -type ExpandOptions struct { - RelativeBase string // the path to the root document to expand. This is a file, not a directory - SkipSchemas bool // do not expand schemas, just paths, parameters and responses - ContinueOnError bool // continue expanding even after and error is found - PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document - AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs -} - -func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { - if opts != nil { - clone := *opts // shallow clone to avoid internal changes to be propagated to the caller - if clone.RelativeBase != "" { - clone.RelativeBase = normalizeBase(clone.RelativeBase) - } - // if the relative base is empty, let the schema loader choose a pseudo root document - return &clone - } - return &ExpandOptions{} -} - -// ExpandSpec expands the references in a swagger spec -func ExpandSpec(spec *Swagger, options *ExpandOptions) error { - options = optionsOrDefault(options) - resolver := defaultSchemaLoader(spec, options, nil, nil) - - specBasePath := options.RelativeBase - - if !options.SkipSchemas { - for key, definition := range spec.Definitions { - parentRefs := make([]string, 0, 10) - parentRefs = append(parentRefs, "#/definitions/"+key) - - def, err := expandSchema(definition, parentRefs, resolver, specBasePath) - if resolver.shouldStopOnError(err) { - return err - } - if def != nil { - spec.Definitions[key] = *def - } - } - } - - for key := range spec.Parameters { - parameter := spec.Parameters[key] - if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Parameters[key] = parameter - } - - for key := range spec.Responses { - response := spec.Responses[key] - if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Responses[key] = response - } - - if spec.Paths != nil { - for key := range spec.Paths.Paths { - pth := spec.Paths.Paths[key] - if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Paths.Paths[key] = pth - } - } - - return nil -} - -const rootBase = ".root" - -// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry -// for further $ref resolution -func baseForRoot(root interface{}, cache ResolutionCache) string { - // cache the root document to resolve $ref's - normalizedBase := normalizeBase(rootBase) - - if root == nil { - // ensure that we never leave a nil root: always cache the root base pseudo-document - cachedRoot, found := cache.Get(normalizedBase) - if found && cachedRoot != nil { - // the cache is already preloaded with a root - return normalizedBase - } - - root = map[string]interface{}{} - } - - cache.Set(normalizedBase, root) - - return normalizedBase -} - -// ExpandSchema expands the refs in the schema object with reference to the root object. -// -// go-openapi/validate uses this function. -// -// Notice that it is impossible to reference a json schema in a different document other than root -// (use ExpandSchemaWithBasePath to resolve external references). -// -// Setting the cache is optional and this parameter may safely be left to nil. -func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { - cache = cacheOrDefault(cache) - if root == nil { - root = schema - } - - opts := &ExpandOptions{ - // when a root is specified, cache the root as an in-memory document for $ref retrieval - RelativeBase: baseForRoot(root, cache), - SkipSchemas: false, - ContinueOnError: false, - } - - return ExpandSchemaWithBasePath(schema, cache, opts) -} - -// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options. -// -// Setting the cache is optional and this parameter may safely be left to nil. -func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { - if schema == nil { - return nil - } - - cache = cacheOrDefault(cache) - - opts = optionsOrDefault(opts) - - resolver := defaultSchemaLoader(nil, opts, cache, nil) - - parentRefs := make([]string, 0, 10) - s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase) - if err != nil { - return err - } - if s != nil { - // guard for when continuing on error - *schema = *s - } - - return nil -} - -func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - if target.Items == nil { - return &target, nil - } - - // array - if target.Items.Schema != nil { - t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) - if err != nil { - return nil, err - } - *target.Items.Schema = *t - } - - // tuple - for i := range target.Items.Schemas { - t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) - if err != nil { - return nil, err - } - target.Items.Schemas[i] = *t - } - - return &target, nil -} - -func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - if target.Ref.String() == "" && target.Ref.IsRoot() { - newRef := normalizeRef(&target.Ref, basePath) - target.Ref = *newRef - return &target, nil - } - - // change the base path of resolution when an ID is encountered - // otherwise the basePath should inherit the parent's - if target.ID != "" { - basePath, _ = resolver.setSchemaID(target, target.ID, basePath) - } - - if target.Ref.String() != "" { - if !resolver.options.SkipSchemas { - return expandSchemaRef(target, parentRefs, resolver, basePath) - } - - // when "expand" with SkipSchema, we just rebase the existing $ref without replacing - // the full schema. - rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath)) - if err != nil { - return nil, err - } - target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) - - return &target, nil - } - - for k := range target.Definitions { - tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if tt != nil { - target.Definitions[k] = *tt - } - } - - t, err := expandItems(target, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target = *t - } - - for i := range target.AllOf { - t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.AllOf[i] = *t - } - } - - for i := range target.AnyOf { - t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.AnyOf[i] = *t - } - } - - for i := range target.OneOf { - t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.OneOf[i] = *t - } - } - - if target.Not != nil { - t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.Not = *t - } - } - - for k := range target.Properties { - t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.Properties[k] = *t - } - } - - if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.AdditionalProperties.Schema = *t - } - } - - for k := range target.PatternProperties { - t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.PatternProperties[k] = *t - } - } - - for k := range target.Dependencies { - if target.Dependencies[k].Schema != nil { - t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.Dependencies[k].Schema = *t - } - } - } - - if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.AdditionalItems.Schema = *t - } - } - return &target, nil -} - -func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - // if a Ref is found, all sibling fields are skipped - // Ref also changes the resolution scope of children expandSchema - - // here the resolution scope is changed because a $ref was encountered - normalizedRef := normalizeRef(&target.Ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if resolver.isCircular(normalizedRef, basePath, parentRefs...) { - // this means there is a cycle in the recursion tree: return the Ref - // - circular refs cannot be expanded. We leave them as ref. - // - denormalization means that a new local file ref is set relative to the original basePath - debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", - basePath, normalizedBasePath, normalizedRef.String()) - if !resolver.options.AbsoluteCircularRef { - target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID) - } else { - target.Ref = *normalizedRef - } - return &target, nil - } - - var t *Schema - err := resolver.Resolve(&target.Ref, &t, basePath) - if resolver.shouldStopOnError(err) { - return nil, err - } - - if t == nil { - // guard for when continuing on error - return &target, nil - } - - parentRefs = append(parentRefs, normalizedRef.String()) - transitiveResolver := resolver.transitiveResolver(basePath, target.Ref) - - basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) - - return expandSchema(*t, parentRefs, transitiveResolver, basePath) -} - -func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { - if pathItem == nil { - return nil - } - - parentRefs := make([]string, 0, 10) - if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err - } - - if pathItem.Ref.String() != "" { - transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref) - basePath = transitiveResolver.updateBasePath(resolver, basePath) - resolver = transitiveResolver - } - - pathItem.Ref = Ref{} - for i := range pathItem.Parameters { - if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - } - - ops := []*Operation{ - pathItem.Get, - pathItem.Head, - pathItem.Options, - pathItem.Put, - pathItem.Post, - pathItem.Patch, - pathItem.Delete, - } - for _, op := range ops { - if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - } - - return nil -} - -func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { - if op == nil { - return nil - } - - for i := range op.Parameters { - param := op.Parameters[i] - if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - op.Parameters[i] = param - } - - if op.Responses == nil { - return nil - } - - responses := op.Responses - if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - - for code := range responses.StatusCodeResponses { - response := responses.StatusCodeResponses[code] - if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - responses.StatusCodeResponses[code] = response - } - - return nil -} - -// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document -// -// Notice that it is impossible to reference a json schema in a different document other than root -// (use ExpandResponse to resolve external references). -// -// Setting the cache is optional and this parameter may safely be left to nil. -func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { - cache = cacheOrDefault(cache) - opts := &ExpandOptions{ - RelativeBase: baseForRoot(root, cache), - } - resolver := defaultSchemaLoader(root, opts, cache, nil) - - return expandParameterOrResponse(response, resolver, opts.RelativeBase) -} - -// ExpandResponse expands a response based on a basepath -// -// All refs inside response will be resolved relative to basePath -func ExpandResponse(response *Response, basePath string) error { - opts := optionsOrDefault(&ExpandOptions{ - RelativeBase: basePath, - }) - resolver := defaultSchemaLoader(nil, opts, nil, nil) - - return expandParameterOrResponse(response, resolver, opts.RelativeBase) -} - -// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document. -// -// Notice that it is impossible to reference a json schema in a different document other than root -// (use ExpandParameter to resolve external references). -func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { - cache = cacheOrDefault(cache) - - opts := &ExpandOptions{ - RelativeBase: baseForRoot(root, cache), - } - resolver := defaultSchemaLoader(root, opts, cache, nil) - - return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) -} - -// ExpandParameter expands a parameter based on a basepath. -// This is the exported version of expandParameter -// all refs inside parameter will be resolved relative to basePath -func ExpandParameter(parameter *Parameter, basePath string) error { - opts := optionsOrDefault(&ExpandOptions{ - RelativeBase: basePath, - }) - resolver := defaultSchemaLoader(nil, opts, nil, nil) - - return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) -} - -func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { - var ( - ref *Ref - sch *Schema - ) - - switch refable := input.(type) { - case *Parameter: - if refable == nil { - return nil, nil, nil - } - ref = &refable.Ref - sch = refable.Schema - case *Response: - if refable == nil { - return nil, nil, nil - } - ref = &refable.Ref - sch = refable.Schema - default: - return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType) - } - - return ref, sch, nil -} - -func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { - ref, sch, err := getRefAndSchema(input) - if err != nil { - return err - } - - if ref == nil && sch == nil { // nothing to do - return nil - } - - parentRefs := make([]string, 0, 10) - if ref != nil { - // dereference this $ref - if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err - } - - ref, sch, _ = getRefAndSchema(input) - } - - if ref.String() != "" { - transitiveResolver := resolver.transitiveResolver(basePath, *ref) - basePath = resolver.updateBasePath(transitiveResolver, basePath) - resolver = transitiveResolver - } - - if sch == nil { - // nothing to be expanded - if ref != nil { - *ref = Ref{} - } - - return nil - } - - if sch.Ref.String() != "" { - rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) - if ern != nil { - return ern - } - - if resolver.isCircular(&rebasedRef, basePath, parentRefs...) { - // this is a circular $ref: stop expansion - if !resolver.options.AbsoluteCircularRef { - sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) - } else { - sch.Ref = rebasedRef - } - } - } - - // $ref expansion or rebasing is performed by expandSchema below - if ref != nil { - *ref = Ref{} - } - - // expand schema - // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref) - s, err := expandSchema(*sch, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return err - } - - if s != nil { // guard for when continuing on error - *sch = *s - } - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go deleted file mode 100644 index 88add91b..00000000 --- a/vendor/github.com/go-openapi/spec/external_docs.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ExternalDocumentation allows referencing an external resource for -// extended documentation. -// -// For more information: http://goo.gl/8us55a#externalDocumentationObject -type ExternalDocumentation struct { - Description string `json:"description,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go deleted file mode 100644 index 9dfd17b1..00000000 --- a/vendor/github.com/go-openapi/spec/header.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - jsonArray = "array" -) - -// HeaderProps describes a response header -type HeaderProps struct { - Description string `json:"description,omitempty"` -} - -// Header describes a header for a response of the API -// -// For more information: http://goo.gl/8us55a#headerObject -type Header struct { - CommonValidations - SimpleSchema - VendorExtensible - HeaderProps -} - -// ResponseHeader creates a new header instance for use in a response -func ResponseHeader() *Header { - return new(Header) -} - -// WithDescription sets the description on this response, allows for chaining -func (h *Header) WithDescription(description string) *Header { - h.Description = description - return h -} - -// Typed a fluent builder method for the type of parameter -func (h *Header) Typed(tpe, format string) *Header { - h.Type = tpe - h.Format = format - return h -} - -// CollectionOf a fluent builder method for an array item -func (h *Header) CollectionOf(items *Items, format string) *Header { - h.Type = jsonArray - h.Items = items - h.CollectionFormat = format - return h -} - -// WithDefault sets the default value on this item -func (h *Header) WithDefault(defaultValue interface{}) *Header { - h.Default = defaultValue - return h -} - -// WithMaxLength sets a max length value -func (h *Header) WithMaxLength(max int64) *Header { - h.MaxLength = &max - return h -} - -// WithMinLength sets a min length value -func (h *Header) WithMinLength(min int64) *Header { - h.MinLength = &min - return h -} - -// WithPattern sets a pattern value -func (h *Header) WithPattern(pattern string) *Header { - h.Pattern = pattern - return h -} - -// WithMultipleOf sets a multiple of value -func (h *Header) WithMultipleOf(number float64) *Header { - h.MultipleOf = &number - return h -} - -// WithMaximum sets a maximum number value -func (h *Header) WithMaximum(max float64, exclusive bool) *Header { - h.Maximum = &max - h.ExclusiveMaximum = exclusive - return h -} - -// WithMinimum sets a minimum number value -func (h *Header) WithMinimum(min float64, exclusive bool) *Header { - h.Minimum = &min - h.ExclusiveMinimum = exclusive - return h -} - -// WithEnum sets a the enum values (replace) -func (h *Header) WithEnum(values ...interface{}) *Header { - h.Enum = append([]interface{}{}, values...) - return h -} - -// WithMaxItems sets the max items -func (h *Header) WithMaxItems(size int64) *Header { - h.MaxItems = &size - return h -} - -// WithMinItems sets the min items -func (h *Header) WithMinItems(size int64) *Header { - h.MinItems = &size - return h -} - -// UniqueValues dictates that this array can only have unique items -func (h *Header) UniqueValues() *Header { - h.UniqueItems = true - return h -} - -// AllowDuplicates this array can have duplicates -func (h *Header) AllowDuplicates() *Header { - h.UniqueItems = false - return h -} - -// WithValidations is a fluent method to set header validations -func (h *Header) WithValidations(val CommonValidations) *Header { - h.SetValidations(SchemaValidations{CommonValidations: val}) - return h -} - -// MarshalJSON marshal this to JSON -func (h Header) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(h.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(h.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(h.HeaderProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// UnmarshalJSON unmarshals this header from JSON -func (h *Header) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &h.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &h.HeaderProps) -} - -// JSONLookup look up a value by the json property name -func (h Header) JSONLookup(token string) (interface{}, error) { - if ex, ok := h.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) - return r, err -} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go deleted file mode 100644 index 582f0fd4..00000000 --- a/vendor/github.com/go-openapi/spec/info.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Extensions vendor specific extensions -type Extensions map[string]interface{} - -// Add adds a value to these extensions -func (e Extensions) Add(key string, value interface{}) { - realKey := strings.ToLower(key) - e[realKey] = value -} - -// GetString gets a string value from the extensions -func (e Extensions) GetString(key string) (string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(string) - return str, ok - } - return "", false -} - -// GetInt gets a int value from the extensions -func (e Extensions) GetInt(key string) (int, bool) { - realKey := strings.ToLower(key) - - if v, ok := e.GetString(realKey); ok { - if r, err := strconv.Atoi(v); err == nil { - return r, true - } - } - - if v, ok := e[realKey]; ok { - if r, rOk := v.(float64); rOk { - return int(r), true - } - } - return -1, false -} - -// GetBool gets a string value from the extensions -func (e Extensions) GetBool(key string) (bool, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(bool) - return str, ok - } - return false, false -} - -// GetStringSlice gets a string value from the extensions -func (e Extensions) GetStringSlice(key string) ([]string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - arr, isSlice := v.([]interface{}) - if !isSlice { - return nil, false - } - var strs []string - for _, iface := range arr { - str, isString := iface.(string) - if !isString { - return nil, false - } - strs = append(strs, str) - } - return strs, ok - } - return nil, false -} - -// VendorExtensible composition block. -type VendorExtensible struct { - Extensions Extensions -} - -// AddExtension adds an extension to this extensible object -func (v *VendorExtensible) AddExtension(key string, value interface{}) { - if value == nil { - return - } - if v.Extensions == nil { - v.Extensions = make(map[string]interface{}) - } - v.Extensions.Add(key, value) -} - -// MarshalJSON marshals the extensions to json -func (v VendorExtensible) MarshalJSON() ([]byte, error) { - toser := make(map[string]interface{}) - for k, v := range v.Extensions { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - toser[k] = v - } - } - return json.Marshal(toser) -} - -// UnmarshalJSON for this extensible object -func (v *VendorExtensible) UnmarshalJSON(data []byte) error { - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if v.Extensions == nil { - v.Extensions = map[string]interface{}{} - } - v.Extensions[k] = vv - } - } - return nil -} - -// InfoProps the properties for an info definition -type InfoProps struct { - Description string `json:"description,omitempty"` - Title string `json:"title,omitempty"` - TermsOfService string `json:"termsOfService,omitempty"` - Contact *ContactInfo `json:"contact,omitempty"` - License *License `json:"license,omitempty"` - Version string `json:"version,omitempty"` -} - -// Info object provides metadata about the API. -// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. -// -// For more information: http://goo.gl/8us55a#infoObject -type Info struct { - VendorExtensible - InfoProps -} - -// JSONLookup look up a value by the json property name -func (i Info) JSONLookup(token string) (interface{}, error) { - if ex, ok := i.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(i.InfoProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (i Info) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.InfoProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (i *Info) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &i.InfoProps); err != nil { - return err - } - return json.Unmarshal(data, &i.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go deleted file mode 100644 index e2afb213..00000000 --- a/vendor/github.com/go-openapi/spec/items.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - jsonRef = "$ref" -) - -// SimpleSchema describe swagger simple schemas for parameters and headers -type SimpleSchema struct { - Type string `json:"type,omitempty"` - Nullable bool `json:"nullable,omitempty"` - Format string `json:"format,omitempty"` - Items *Items `json:"items,omitempty"` - CollectionFormat string `json:"collectionFormat,omitempty"` - Default interface{} `json:"default,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// TypeName return the type (or format) of a simple schema -func (s *SimpleSchema) TypeName() string { - if s.Format != "" { - return s.Format - } - return s.Type -} - -// ItemsTypeName yields the type of items in a simple schema array -func (s *SimpleSchema) ItemsTypeName() string { - if s.Items == nil { - return "" - } - return s.Items.TypeName() -} - -// Items a limited subset of JSON-Schema's items object. -// It is used by parameter definitions that are not located in "body". -// -// For more information: http://goo.gl/8us55a#items-object -type Items struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible -} - -// NewItems creates a new instance of items -func NewItems() *Items { - return &Items{} -} - -// Typed a fluent builder method for the type of item -func (i *Items) Typed(tpe, format string) *Items { - i.Type = tpe - i.Format = format - return i -} - -// AsNullable flags this schema as nullable. -func (i *Items) AsNullable() *Items { - i.Nullable = true - return i -} - -// CollectionOf a fluent builder method for an array item -func (i *Items) CollectionOf(items *Items, format string) *Items { - i.Type = jsonArray - i.Items = items - i.CollectionFormat = format - return i -} - -// WithDefault sets the default value on this item -func (i *Items) WithDefault(defaultValue interface{}) *Items { - i.Default = defaultValue - return i -} - -// WithMaxLength sets a max length value -func (i *Items) WithMaxLength(max int64) *Items { - i.MaxLength = &max - return i -} - -// WithMinLength sets a min length value -func (i *Items) WithMinLength(min int64) *Items { - i.MinLength = &min - return i -} - -// WithPattern sets a pattern value -func (i *Items) WithPattern(pattern string) *Items { - i.Pattern = pattern - return i -} - -// WithMultipleOf sets a multiple of value -func (i *Items) WithMultipleOf(number float64) *Items { - i.MultipleOf = &number - return i -} - -// WithMaximum sets a maximum number value -func (i *Items) WithMaximum(max float64, exclusive bool) *Items { - i.Maximum = &max - i.ExclusiveMaximum = exclusive - return i -} - -// WithMinimum sets a minimum number value -func (i *Items) WithMinimum(min float64, exclusive bool) *Items { - i.Minimum = &min - i.ExclusiveMinimum = exclusive - return i -} - -// WithEnum sets a the enum values (replace) -func (i *Items) WithEnum(values ...interface{}) *Items { - i.Enum = append([]interface{}{}, values...) - return i -} - -// WithMaxItems sets the max items -func (i *Items) WithMaxItems(size int64) *Items { - i.MaxItems = &size - return i -} - -// WithMinItems sets the min items -func (i *Items) WithMinItems(size int64) *Items { - i.MinItems = &size - return i -} - -// UniqueValues dictates that this array can only have unique items -func (i *Items) UniqueValues() *Items { - i.UniqueItems = true - return i -} - -// AllowDuplicates this array can have duplicates -func (i *Items) AllowDuplicates() *Items { - i.UniqueItems = false - return i -} - -// WithValidations is a fluent method to set Items validations -func (i *Items) WithValidations(val CommonValidations) *Items { - i.SetValidations(SchemaValidations{CommonValidations: val}) - return i -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (i *Items) UnmarshalJSON(data []byte) error { - var validations CommonValidations - if err := json.Unmarshal(data, &validations); err != nil { - return err - } - var ref Refable - if err := json.Unmarshal(data, &ref); err != nil { - return err - } - var simpleSchema SimpleSchema - if err := json.Unmarshal(data, &simpleSchema); err != nil { - return err - } - var vendorExtensible VendorExtensible - if err := json.Unmarshal(data, &vendorExtensible); err != nil { - return err - } - i.Refable = ref - i.CommonValidations = validations - i.SimpleSchema = simpleSchema - i.VendorExtensible = vendorExtensible - return nil -} - -// MarshalJSON converts this items object to JSON -func (i Items) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(i.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b4, b3, b1, b2), nil -} - -// JSONLookup look up a value by the json property name -func (i Items) JSONLookup(token string) (interface{}, error) { - if token == jsonRef { - return &i.Ref, nil - } - - r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) - return r, err -} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go deleted file mode 100644 index b42f8036..00000000 --- a/vendor/github.com/go-openapi/spec/license.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// License information for the exposed API. -// -// For more information: http://goo.gl/8us55a#licenseObject -type License struct { - LicenseProps - VendorExtensible -} - -// LicenseProps holds the properties of a License object -type LicenseProps struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` -} - -// UnmarshalJSON hydrates License from json -func (l *License) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &l.LicenseProps); err != nil { - return err - } - return json.Unmarshal(data, &l.VendorExtensible) -} - -// MarshalJSON produces License as json -func (l License) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(l.LicenseProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(l.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go deleted file mode 100644 index e8b60099..00000000 --- a/vendor/github.com/go-openapi/spec/normalizer.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "net/url" - "path" - "strings" -) - -const fileScheme = "file" - -// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized. -// -// NOTE(windows): there is a tolerance over the strict URI format on windows. -// -// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like -// 'C:\Path\file.Yaml'. -// -// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path: -// 'file:///c:/path/file.yaml' -// -// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or -// 'file:///c:\folder\File.json'. -// -// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair" -// is attempted. -// -// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). -func normalizeURI(refPath, base string) string { - refURL, err := parseURL(refPath) - if err != nil { - specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) - refURL, refPath = repairURI(refPath) - } - - fixWindowsURI(refURL, refPath) // noop on non-windows OS - - refURL.Path = path.Clean(refURL.Path) - if refURL.Path == "." { - refURL.Path = "" - } - - r := MustCreateRef(refURL.String()) - if r.IsCanonical() { - return refURL.String() - } - - baseURL, _ := parseURL(base) - if path.IsAbs(refURL.Path) { - baseURL.Path = refURL.Path - } else if refURL.Path != "" { - baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) - } - // copying fragment from ref to base - baseURL.Fragment = refURL.Fragment - - return baseURL.String() -} - -// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document. -// -// When calling this, we assume that: -// * $ref is a canonical URI -// * originalRelativeBase is a canonical URI -// -// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected. -// In this case, expansion stops and normally renders the internal canonical $ref. -// -// This internal $ref is eventually rebased to the original RelativeBase used for the expansion. -// -// There is a special case for schemas that are anchored with an "id": -// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. -// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. -func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { - debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) - - if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { - // short circuit: $ref to current doc - return *ref - } - - if id != "" { - idBaseURL, err := parseURL(id) - if err == nil { // if the schema id is not usable as a URI, ignore it - if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") - // $ref relative to the ID of the schema in the root document - return ref - } - } - } - - originalRelativeBaseURL, _ := parseURL(originalRelativeBase) - - r, _ := rebase(ref, originalRelativeBaseURL, false) - - return r -} - -func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { - var newBase url.URL - - u := ref.GetURL() - - if u.Scheme != v.Scheme || u.Host != v.Host { - return *ref, false - } - - docPath := v.Path - v.Path = path.Dir(v.Path) - - if v.Path == "." { - v.Path = "" - } else if !strings.HasSuffix(v.Path, "/") { - v.Path += "/" - } - - newBase.Fragment = u.Fragment - - if strings.HasPrefix(u.Path, docPath) { - newBase.Path = strings.TrimPrefix(u.Path, docPath) - } else { - newBase.Path = strings.TrimPrefix(u.Path, v.Path) - } - - if notEqual && newBase.Path == "" && newBase.Fragment == "" { - // do not want rebasing to end up in an empty $ref - return *ref, false - } - - if path.IsAbs(newBase.Path) { - // whenever we end up with an absolute path, specify the scheme and host - newBase.Scheme = v.Scheme - newBase.Host = v.Host - } - - return MustCreateRef(newBase.String()), true -} - -// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor -func normalizeRef(ref *Ref, relativeBase string) *Ref { - r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) - return &r -} - -// normalizeBase performs a normalization of the input base path. -// -// This always yields a canonical URI (absolute), usable for the document cache. -// -// It ensures that all further internal work on basePath may safely assume -// a non-empty, cross-platform, canonical URI (i.e. absolute). -// -// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this -// in a file:// URL with lower cased drive letter and path. -// -// See also: https://en.wikipedia.org/wiki/File_URI_scheme -func normalizeBase(in string) string { - u, err := parseURL(in) - if err != nil { - specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) - u, in = repairURI(in) - } - - u.Fragment = "" // any fragment in the base is irrelevant - - fixWindowsURI(u, in) // noop on non-windows OS - - u.Path = path.Clean(u.Path) - if u.Path == "." { // empty after Clean() - u.Path = "" - } - - if u.Scheme != "" { - if path.IsAbs(u.Path) || u.Scheme != fileScheme { - // this is absolute or explicitly not a local file: we're good - return u.String() - } - } - - // no scheme or file scheme with relative path: assume file and make it absolute - // enforce scheme file://... with absolute path. - // - // If the input path is relative, we anchor the path to the current working directory. - // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json - - u.Scheme = fileScheme - u.Path = absPath(u.Path) // platform-dependent - u.RawQuery = "" // any query component is irrelevant for a base - return u.String() -} diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go deleted file mode 100644 index f19f1a8f..00000000 --- a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go +++ /dev/null @@ -1,44 +0,0 @@ -//go:build !windows -// +build !windows - -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "net/url" - "path/filepath" -) - -// absPath makes a file path absolute and compatible with a URI path component. -// -// The parameter must be a path, not an URI. -func absPath(in string) string { - anchored, err := filepath.Abs(in) - if err != nil { - specLogger.Printf("warning: could not resolve current working directory: %v", err) - return in - } - return anchored -} - -func repairURI(in string) (*url.URL, string) { - u, _ := parseURL("") - debugLog("repaired URI: original: %q, repaired: %q", in, "") - return u, "" -} - -func fixWindowsURI(_ *url.URL, _ string) { -} diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go deleted file mode 100644 index a66c532d..00000000 --- a/vendor/github.com/go-openapi/spec/normalizer_windows.go +++ /dev/null @@ -1,154 +0,0 @@ -// -build windows - -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "net/url" - "os" - "path" - "path/filepath" - "strings" -) - -// absPath makes a file path absolute and compatible with a URI path component -// -// The parameter must be a path, not an URI. -func absPath(in string) string { - // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths. - // See https://github.com/golang/go/issues/24441 - if in == "" { - in = "." - } - - anchored, err := filepath.Abs(in) - if err != nil { - specLogger.Printf("warning: could not resolve current working directory: %v", err) - return in - } - - pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`) - if !strings.HasPrefix(pth, "/") { - pth = "/" + pth - } - - return path.Clean(pth) -} - -// repairURI tolerates invalid file URIs with common typos -// such as 'file://E:\folder\file', that break the regular URL parser. -// -// Adopting the same defaults as for unixes (e.g. return an empty path) would -// result into a counter-intuitive result for that case (e.g. E:\folder\file is -// eventually resolved as the current directory). The repair will detect the missing "/". -// -// Note that this only works for the file scheme. -func repairURI(in string) (*url.URL, string) { - const prefix = fileScheme + "://" - if !strings.HasPrefix(in, prefix) { - // giving up: resolve to empty path - u, _ := parseURL("") - - return u, "" - } - - // attempt the repair, stripping the scheme should be sufficient - u, _ := parseURL(strings.TrimPrefix(in, prefix)) - debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) - - return u, u.String() -} - -// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml -// and makes it a canonical URI: file:///c:/base/file.yaml -// -// Catch 22 notes for Windows: -// -// * There may be a drive letter on windows (it is lower-cased) -// * There may be a share UNC, e.g. \\server\folder\data.xml -// * Paths are case insensitive -// * Paths may already contain slashes -// * Paths must be slashed -// -// NOTE: there is no escaping. "/" may be valid separators just like "\". -// We don't use ToSlash() (which escapes everything) because windows now also -// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work. -func fixWindowsURI(u *url.URL, in string) { - drive := filepath.VolumeName(in) - - if len(drive) > 0 { - if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter - u.Scheme = fileScheme - u.Host = "" - u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query) - } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume - // NOTE: the special host@port syntax for UNC is not supported (yet) - u.Scheme = fileScheme - - // this is a modified version of filepath.Dir() to apply on the VolumeName itself - i := len(drive) - 1 - for i >= 0 && !os.IsPathSeparator(drive[i]) { - i-- - } - host := drive[:i] // \\host\share => host - - u.Path = strings.TrimPrefix(u.Path, host) - u.Host = strings.TrimPrefix(host, `\\`) - } - - u.Opaque = "" - u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`) - - // ensure we form an absolute path - if !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - - u.Path = path.Clean(u.Path) - - return - } - - if u.Scheme == fileScheme { - // Handle dodgy cases for file://{...} URIs on windows. - // A canonical URI should always be followed by an absolute path. - // - // Examples: - // * file:///folder/file => valid, unchanged - // * file:///c:\folder\file => slashed - // * file:///./folder/file => valid, cleaned to remove the dot - // * file:///.\folder\file => remapped to cwd - // * file:///. => dodgy, remapped to / (consistent with the behavior on unix) - // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix) - if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) { - // ensure we form an absolute path - u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`)) - if !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - } - u.Path = strings.ToLower(u.Path) - } - - // NOTE: lower case normalization does not propagate to inner resources, - // generated when rebasing: when joining a relative URI with a file to an absolute base, - // only the base is currently lower-cased. - // - // For now, we assume this is good enough for most use cases - // and try not to generate too many differences - // between the output produced on different platforms. - u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`)) -} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go deleted file mode 100644 index a69cca88..00000000 --- a/vendor/github.com/go-openapi/spec/operation.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "sort" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -func init() { - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) -} - -// OperationProps describes an operation -// -// NOTES: -// - schemes, when present must be from [http, https, ws, wss]: see validate -// - Security is handled as a special case: see MarshalJSON function -type OperationProps struct { - Description string `json:"description,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Tags []string `json:"tags,omitempty"` - Summary string `json:"summary,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - ID string `json:"operationId,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` - Responses *Responses `json:"responses,omitempty"` -} - -// MarshalJSON takes care of serializing operation properties to JSON -// -// We use a custom marhaller here to handle a special cases related to -// the Security field. We need to preserve zero length slice -// while omitting the field when the value is nil/unset. -func (op OperationProps) MarshalJSON() ([]byte, error) { - type Alias OperationProps - if op.Security == nil { - return json.Marshal(&struct { - Security []map[string][]string `json:"security,omitempty"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) - } - return json.Marshal(&struct { - Security []map[string][]string `json:"security"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) -} - -// Operation describes a single API operation on a path. -// -// For more information: http://goo.gl/8us55a#operationObject -type Operation struct { - VendorExtensible - OperationProps -} - -// SuccessResponse gets a success response model -func (o *Operation) SuccessResponse() (*Response, int, bool) { - if o.Responses == nil { - return nil, 0, false - } - - responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) - for k := range o.Responses.StatusCodeResponses { - if k >= 200 && k < 300 { - responseCodes = append(responseCodes, k) - } - } - if len(responseCodes) > 0 { - sort.Ints(responseCodes) - v := o.Responses.StatusCodeResponses[responseCodes[0]] - return &v, responseCodes[0], true - } - - return o.Responses.Default, 0, false -} - -// JSONLookup look up a value by the json property name -func (o Operation) JSONLookup(token string) (interface{}, error) { - if ex, ok := o.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(o.OperationProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (o *Operation) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &o.OperationProps); err != nil { - return err - } - return json.Unmarshal(data, &o.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (o Operation) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(o.OperationProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(o.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// NewOperation creates a new operation instance. -// It expects an ID as parameter but not passing an ID is also valid. -func NewOperation(id string) *Operation { - op := new(Operation) - op.ID = id - return op -} - -// WithID sets the ID property on this operation, allows for chaining. -func (o *Operation) WithID(id string) *Operation { - o.ID = id - return o -} - -// WithDescription sets the description on this operation, allows for chaining -func (o *Operation) WithDescription(description string) *Operation { - o.Description = description - return o -} - -// WithSummary sets the summary on this operation, allows for chaining -func (o *Operation) WithSummary(summary string) *Operation { - o.Summary = summary - return o -} - -// WithExternalDocs sets/removes the external docs for/from this operation. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (o *Operation) WithExternalDocs(description, url string) *Operation { - if description == "" && url == "" { - o.ExternalDocs = nil - return o - } - - if o.ExternalDocs == nil { - o.ExternalDocs = &ExternalDocumentation{} - } - o.ExternalDocs.Description = description - o.ExternalDocs.URL = url - return o -} - -// Deprecate marks the operation as deprecated -func (o *Operation) Deprecate() *Operation { - o.Deprecated = true - return o -} - -// Undeprecate marks the operation as not deprected -func (o *Operation) Undeprecate() *Operation { - o.Deprecated = false - return o -} - -// WithConsumes adds media types for incoming body values -func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { - o.Consumes = append(o.Consumes, mediaTypes...) - return o -} - -// WithProduces adds media types for outgoing body values -func (o *Operation) WithProduces(mediaTypes ...string) *Operation { - o.Produces = append(o.Produces, mediaTypes...) - return o -} - -// WithTags adds tags for this operation -func (o *Operation) WithTags(tags ...string) *Operation { - o.Tags = append(o.Tags, tags...) - return o -} - -// AddParam adds a parameter to this operation, when a parameter for that location -// and with that name already exists it will be replaced -func (o *Operation) AddParam(param *Parameter) *Operation { - if param == nil { - return o - } - - for i, p := range o.Parameters { - if p.Name == param.Name && p.In == param.In { - params := make([]Parameter, 0, len(o.Parameters)+1) - params = append(params, o.Parameters[:i]...) - params = append(params, *param) - params = append(params, o.Parameters[i+1:]...) - o.Parameters = params - - return o - } - } - - o.Parameters = append(o.Parameters, *param) - return o -} - -// RemoveParam removes a parameter from the operation -func (o *Operation) RemoveParam(name, in string) *Operation { - for i, p := range o.Parameters { - if p.Name == name && p.In == in { - o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) - return o - } - } - return o -} - -// SecuredWith adds a security scope to this operation. -func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { - o.Security = append(o.Security, map[string][]string{name: scopes}) - return o -} - -// WithDefaultResponse adds a default response to the operation. -// Passing a nil value will remove the response -func (o *Operation) WithDefaultResponse(response *Response) *Operation { - return o.RespondsWith(0, response) -} - -// RespondsWith adds a status code response to the operation. -// When the code is 0 the value of the response will be used as default response value. -// When the value of the response is nil it will be removed from the operation -func (o *Operation) RespondsWith(code int, response *Response) *Operation { - if o.Responses == nil { - o.Responses = new(Responses) - } - if code == 0 { - o.Responses.Default = response - return o - } - if response == nil { - delete(o.Responses.StatusCodeResponses, code) - return o - } - if o.Responses.StatusCodeResponses == nil { - o.Responses.StatusCodeResponses = make(map[int]Response) - } - o.Responses.StatusCodeResponses[code] = *response - return o -} - -type opsAlias OperationProps - -type gobAlias struct { - Security []map[string]struct { - List []string - Pad bool - } - Alias *opsAlias - SecurityIsEmpty bool -} - -// GobEncode provides a safe gob encoder for Operation, including empty security requirements -func (o Operation) GobEncode() ([]byte, error) { - raw := struct { - Ext VendorExtensible - Props OperationProps - }{ - Ext: o.VendorExtensible, - Props: o.OperationProps, - } - var b bytes.Buffer - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Operation, including empty security requirements -func (o *Operation) GobDecode(b []byte) error { - var raw struct { - Ext VendorExtensible - Props OperationProps - } - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - o.VendorExtensible = raw.Ext - o.OperationProps = raw.Props - return nil -} - -// GobEncode provides a safe gob encoder for Operation, including empty security requirements -func (op OperationProps) GobEncode() ([]byte, error) { - raw := gobAlias{ - Alias: (*opsAlias)(&op), - } - - var b bytes.Buffer - if op.Security == nil { - // nil security requirement - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - if len(op.Security) == 0 { - // empty, but non-nil security requirement - raw.SecurityIsEmpty = true - raw.Alias.Security = nil - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - raw.Security = make([]map[string]struct { - List []string - Pad bool - }, 0, len(op.Security)) - for _, req := range op.Security { - v := make(map[string]struct { - List []string - Pad bool - }, len(req)) - for k, val := range req { - v[k] = struct { - List []string - Pad bool - }{ - List: val, - } - } - raw.Security = append(raw.Security, v) - } - - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Operation, including empty security requirements -func (op *OperationProps) GobDecode(b []byte) error { - var raw gobAlias - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - if raw.Alias == nil { - return nil - } - - switch { - case raw.SecurityIsEmpty: - // empty, but non-nil security requirement - raw.Alias.Security = []map[string][]string{} - case len(raw.Alias.Security) == 0: - // nil security requirement - raw.Alias.Security = nil - default: - raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) - for _, req := range raw.Security { - v := make(map[string][]string, len(req)) - for k, val := range req { - v[k] = make([]string, 0, len(val.List)) - v[k] = append(v[k], val.List...) - } - raw.Alias.Security = append(raw.Alias.Security, v) - } - } - - *op = *(*OperationProps)(raw.Alias) - return nil -} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go deleted file mode 100644 index bd4f1cdb..00000000 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// QueryParam creates a query parameter -func QueryParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} -} - -// HeaderParam creates a header parameter, this is always required by default -func HeaderParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} -} - -// PathParam creates a path parameter, this is always required -func PathParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} -} - -// BodyParam creates a body parameter -func BodyParam(name string, schema *Schema) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} -} - -// FormDataParam creates a body parameter -func FormDataParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} -} - -// FileParam creates a body parameter -func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, - SimpleSchema: SimpleSchema{Type: "file"}} -} - -// SimpleArrayParam creates a param for a simple array (string, int, date etc) -func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, - SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", - Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} -} - -// ParamRef creates a parameter that's a json reference -func ParamRef(uri string) *Parameter { - p := new(Parameter) - p.Ref = MustCreateRef(uri) - return p -} - -// ParamProps describes the specific attributes of an operation parameter -// -// NOTE: -// - Schema is defined when "in" == "body": see validate -// - AllowEmptyValue is allowed where "in" == "query" || "formData" -type ParamProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` -} - -// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). -// -// There are five possible parameter types. -// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. -// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// - Header - Custom headers that are expected as part of the request. -// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. -// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// - `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. -// -// For more information: http://goo.gl/8us55a#parameterObject -type Parameter struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible - ParamProps -} - -// JSONLookup look up a value by the json property name -func (p Parameter) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == jsonRef { - return &p.Ref, nil - } - - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.ParamProps, token) - return r, err -} - -// WithDescription a fluent builder method for the description of the parameter -func (p *Parameter) WithDescription(description string) *Parameter { - p.Description = description - return p -} - -// Named a fluent builder method to override the name of the parameter -func (p *Parameter) Named(name string) *Parameter { - p.Name = name - return p -} - -// WithLocation a fluent builder method to override the location of the parameter -func (p *Parameter) WithLocation(in string) *Parameter { - p.In = in - return p -} - -// Typed a fluent builder method for the type of the parameter value -func (p *Parameter) Typed(tpe, format string) *Parameter { - p.Type = tpe - p.Format = format - return p -} - -// CollectionOf a fluent builder method for an array parameter -func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { - p.Type = jsonArray - p.Items = items - p.CollectionFormat = format - return p -} - -// WithDefault sets the default value on this parameter -func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { - p.AsOptional() // with default implies optional - p.Default = defaultValue - return p -} - -// AllowsEmptyValues flags this parameter as being ok with empty values -func (p *Parameter) AllowsEmptyValues() *Parameter { - p.AllowEmptyValue = true - return p -} - -// NoEmptyValues flags this parameter as not liking empty values -func (p *Parameter) NoEmptyValues() *Parameter { - p.AllowEmptyValue = false - return p -} - -// AsOptional flags this parameter as optional -func (p *Parameter) AsOptional() *Parameter { - p.Required = false - return p -} - -// AsRequired flags this parameter as required -func (p *Parameter) AsRequired() *Parameter { - if p.Default != nil { // with a default required makes no sense - return p - } - p.Required = true - return p -} - -// WithMaxLength sets a max length value -func (p *Parameter) WithMaxLength(max int64) *Parameter { - p.MaxLength = &max - return p -} - -// WithMinLength sets a min length value -func (p *Parameter) WithMinLength(min int64) *Parameter { - p.MinLength = &min - return p -} - -// WithPattern sets a pattern value -func (p *Parameter) WithPattern(pattern string) *Parameter { - p.Pattern = pattern - return p -} - -// WithMultipleOf sets a multiple of value -func (p *Parameter) WithMultipleOf(number float64) *Parameter { - p.MultipleOf = &number - return p -} - -// WithMaximum sets a maximum number value -func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { - p.Maximum = &max - p.ExclusiveMaximum = exclusive - return p -} - -// WithMinimum sets a minimum number value -func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { - p.Minimum = &min - p.ExclusiveMinimum = exclusive - return p -} - -// WithEnum sets a the enum values (replace) -func (p *Parameter) WithEnum(values ...interface{}) *Parameter { - p.Enum = append([]interface{}{}, values...) - return p -} - -// WithMaxItems sets the max items -func (p *Parameter) WithMaxItems(size int64) *Parameter { - p.MaxItems = &size - return p -} - -// WithMinItems sets the min items -func (p *Parameter) WithMinItems(size int64) *Parameter { - p.MinItems = &size - return p -} - -// UniqueValues dictates that this array can only have unique items -func (p *Parameter) UniqueValues() *Parameter { - p.UniqueItems = true - return p -} - -// AllowDuplicates this array can have duplicates -func (p *Parameter) AllowDuplicates() *Parameter { - p.UniqueItems = false - return p -} - -// WithValidations is a fluent method to set parameter validations -func (p *Parameter) WithValidations(val CommonValidations) *Parameter { - p.SetValidations(SchemaValidations{CommonValidations: val}) - return p -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Parameter) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.ParamProps) -} - -// MarshalJSON converts this items object to JSON -func (p Parameter) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(p.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.ParamProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b3, b1, b2, b4, b5), nil -} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go deleted file mode 100644 index 68fc8e90..00000000 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// PathItemProps the path item specific properties -type PathItemProps struct { - Get *Operation `json:"get,omitempty"` - Put *Operation `json:"put,omitempty"` - Post *Operation `json:"post,omitempty"` - Delete *Operation `json:"delete,omitempty"` - Options *Operation `json:"options,omitempty"` - Head *Operation `json:"head,omitempty"` - Patch *Operation `json:"patch,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` -} - -// PathItem describes the operations available on a single path. -// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// The path itself is still exposed to the documentation viewer but they will -// not know which operations and parameters are available. -// -// For more information: http://goo.gl/8us55a#pathItemObject -type PathItem struct { - Refable - VendorExtensible - PathItemProps -} - -// JSONLookup look up a value by the json property name -func (p PathItem) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == jsonRef { - return &p.Ref, nil - } - r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *PathItem) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.PathItemProps) -} - -// MarshalJSON converts this items object to JSON -func (p PathItem) MarshalJSON() ([]byte, error) { - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.PathItemProps) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b3, b4, b5) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go deleted file mode 100644 index 9dc82a29..00000000 --- a/vendor/github.com/go-openapi/spec/paths.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/go-openapi/swag" -) - -// Paths holds the relative paths to the individual endpoints. -// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order -// to construct the full URL. -// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// -// For more information: http://goo.gl/8us55a#pathsObject -type Paths struct { - VendorExtensible - Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" -} - -// JSONLookup look up a value by the json property name -func (p Paths) JSONLookup(token string) (interface{}, error) { - if pi, ok := p.Paths[token]; ok { - return &pi, nil - } - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Paths) UnmarshalJSON(data []byte) error { - var res map[string]json.RawMessage - if err := json.Unmarshal(data, &res); err != nil { - return err - } - for k, v := range res { - if strings.HasPrefix(strings.ToLower(k), "x-") { - if p.Extensions == nil { - p.Extensions = make(map[string]interface{}) - } - var d interface{} - if err := json.Unmarshal(v, &d); err != nil { - return err - } - p.Extensions[k] = d - } - if strings.HasPrefix(k, "/") { - if p.Paths == nil { - p.Paths = make(map[string]PathItem) - } - var pi PathItem - if err := json.Unmarshal(v, &pi); err != nil { - return err - } - p.Paths[k] = pi - } - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - - pths := make(map[string]PathItem) - for k, v := range p.Paths { - if strings.HasPrefix(k, "/") { - pths[k] = v - } - } - b2, err := json.Marshal(pths) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go deleted file mode 100644 index 91d2435f..00000000 --- a/vendor/github.com/go-openapi/spec/properties.go +++ /dev/null @@ -1,91 +0,0 @@ -package spec - -import ( - "bytes" - "encoding/json" - "reflect" - "sort" -) - -// OrderSchemaItem holds a named schema (e.g. from a property of an object) -type OrderSchemaItem struct { - Name string - Schema -} - -// OrderSchemaItems is a sortable slice of named schemas. -// The ordering is defined by the x-order schema extension. -type OrderSchemaItems []OrderSchemaItem - -// MarshalJSON produces a json object with keys defined by the name schemas -// of the OrderSchemaItems slice, keeping the original order of the slice. -func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { - buf := bytes.NewBuffer(nil) - buf.WriteString("{") - for i := range items { - if i > 0 { - buf.WriteString(",") - } - buf.WriteString("\"") - buf.WriteString(items[i].Name) - buf.WriteString("\":") - bs, err := json.Marshal(&items[i].Schema) - if err != nil { - return nil, err - } - buf.Write(bs) - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -func (items OrderSchemaItems) Len() int { return len(items) } -func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } -func (items OrderSchemaItems) Less(i, j int) (ret bool) { - ii, oki := items[i].Extensions.GetInt("x-order") - ij, okj := items[j].Extensions.GetInt("x-order") - if oki { - if okj { - defer func() { - if err := recover(); err != nil { - defer func() { - if err = recover(); err != nil { - ret = items[i].Name < items[j].Name - } - }() - ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() - } - }() - return ii < ij - } - return true - } else if okj { - return false - } - return items[i].Name < items[j].Name -} - -// SchemaProperties is a map representing the properties of a Schema object. -// It knows how to transform its keys into an ordered slice. -type SchemaProperties map[string]Schema - -// ToOrderedSchemaItems transforms the map of properties into a sortable slice -func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { - items := make(OrderSchemaItems, 0, len(properties)) - for k, v := range properties { - items = append(items, OrderSchemaItem{ - Name: k, - Schema: v, - }) - } - sort.Sort(items) - return items -} - -// MarshalJSON produces properties as json, keeping their order. -func (properties SchemaProperties) MarshalJSON() ([]byte, error) { - if properties == nil { - return []byte("null"), nil - } - return json.Marshal(properties.ToOrderedSchemaItems()) -} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go deleted file mode 100644 index b0ef9bd9..00000000 --- a/vendor/github.com/go-openapi/spec/ref.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "net/http" - "os" - "path/filepath" - - "github.com/go-openapi/jsonreference" -) - -// Refable is a struct for things that accept a $ref property -type Refable struct { - Ref Ref -} - -// MarshalJSON marshals the ref to json -func (r Refable) MarshalJSON() ([]byte, error) { - return r.Ref.MarshalJSON() -} - -// UnmarshalJSON unmarshalss the ref from json -func (r *Refable) UnmarshalJSON(d []byte) error { - return json.Unmarshal(d, &r.Ref) -} - -// Ref represents a json reference that is potentially resolved -type Ref struct { - jsonreference.Ref -} - -// RemoteURI gets the remote uri part of the ref -func (r *Ref) RemoteURI() string { - if r.String() == "" { - return "" - } - - u := *r.GetURL() - u.Fragment = "" - return u.String() -} - -// IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI(basepaths ...string) bool { - if r.String() == "" { - return true - } - - v := r.RemoteURI() - if v == "" { - return true - } - - if r.HasFullURL { - //nolint:noctx,gosec - rr, err := http.Get(v) - if err != nil { - return false - } - defer rr.Body.Close() - - return rr.StatusCode/100 == 2 - } - - if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { - return false - } - - // check for local file - pth := v - if r.HasURLPathOnly { - base := "." - if len(basepaths) > 0 { - base = filepath.Dir(filepath.Join(basepaths...)) - } - p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) - if e != nil { - return false - } - pth = p - } - - fi, err := os.Stat(filepath.ToSlash(pth)) - if err != nil { - return false - } - - return !fi.IsDir() -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - ref, err := r.Ref.Inherits(child.Ref) - if err != nil { - return nil, err - } - return &Ref{Ref: *ref}, nil -} - -// NewRef creates a new instance of a ref object -// returns an error when the reference uri is an invalid uri -func NewRef(refURI string) (Ref, error) { - ref, err := jsonreference.New(refURI) - if err != nil { - return Ref{}, err - } - return Ref{Ref: ref}, nil -} - -// MustCreateRef creates a ref object but panics when refURI is invalid. -// Use the NewRef method for a version that returns an error. -func MustCreateRef(refURI string) Ref { - return Ref{Ref: jsonreference.MustCreateRef(refURI)} -} - -// MarshalJSON marshals this ref into a JSON object -func (r Ref) MarshalJSON() ([]byte, error) { - str := r.String() - if str == "" { - if r.IsRoot() { - return []byte(`{"$ref":""}`), nil - } - return []byte("{}"), nil - } - v := map[string]interface{}{"$ref": str} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshals this ref from a JSON object -func (r *Ref) UnmarshalJSON(d []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(d, &v); err != nil { - return err - } - return r.fromMap(v) -} - -// GobEncode provides a safe gob encoder for Ref -func (r Ref) GobEncode() ([]byte, error) { - var b bytes.Buffer - raw, err := r.MarshalJSON() - if err != nil { - return nil, err - } - err = gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Ref -func (r *Ref) GobDecode(b []byte) error { - var raw []byte - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - return json.Unmarshal(raw, r) -} - -func (r *Ref) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go deleted file mode 100644 index 47d1ee13..00000000 --- a/vendor/github.com/go-openapi/spec/resolver.go +++ /dev/null @@ -1,127 +0,0 @@ -package spec - -import ( - "fmt" - - "github.com/go-openapi/swag" -) - -func resolveAnyWithBase(root interface{}, ref *Ref, result interface{}, options *ExpandOptions) error { - options = optionsOrDefault(options) - resolver := defaultSchemaLoader(root, options, nil, nil) - - if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil { - return err - } - - return nil -} - -// ResolveRefWithBase resolves a reference against a context root with preservation of base path -func ResolveRefWithBase(root interface{}, ref *Ref, options *ExpandOptions) (*Schema, error) { - result := new(Schema) - - if err := resolveAnyWithBase(root, ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolveRef resolves a reference for a schema against a context root -// ref is guaranteed to be in root (no need to go to external files) -// -// ResolveRef is ONLY called from the code generation module -func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { - res, _, err := ref.GetPointer().Get(root) - if err != nil { - return nil, err - } - - switch sch := res.(type) { - case Schema: - return &sch, nil - case *Schema: - return sch, nil - case map[string]interface{}: - newSch := new(Schema) - if err = swag.DynamicJSONToStruct(sch, newSch); err != nil { - return nil, err - } - return newSch, nil - default: - return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference) - } -} - -// ResolveParameterWithBase resolves a parameter reference against a context root and base path -func ResolveParameterWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Parameter, error) { - result := new(Parameter) - - if err := resolveAnyWithBase(root, &ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolveParameter resolves a parameter reference against a context root -func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { - return ResolveParameterWithBase(root, ref, nil) -} - -// ResolveResponseWithBase resolves response a reference against a context root and base path -func ResolveResponseWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Response, error) { - result := new(Response) - - err := resolveAnyWithBase(root, &ref, result, options) - if err != nil { - return nil, err - } - - return result, nil -} - -// ResolveResponse resolves response a reference against a context root -func ResolveResponse(root interface{}, ref Ref) (*Response, error) { - return ResolveResponseWithBase(root, ref, nil) -} - -// ResolvePathItemWithBase resolves response a path item against a context root and base path -func ResolvePathItemWithBase(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { - result := new(PathItem) - - if err := resolveAnyWithBase(root, &ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolvePathItem resolves response a path item against a context root and base path -// -// Deprecated: use ResolvePathItemWithBase instead -func ResolvePathItem(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { - return ResolvePathItemWithBase(root, ref, options) -} - -// ResolveItemsWithBase resolves parameter items reference against a context root and base path. -// -// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. -// Similarly, $ref are forbidden in response headers. -func ResolveItemsWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { - result := new(Items) - - if err := resolveAnyWithBase(root, &ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolveItems resolves parameter items reference against a context root and base path. -// -// Deprecated: use ResolveItemsWithBase instead -func ResolveItems(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { - return ResolveItemsWithBase(root, ref, options) -} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go deleted file mode 100644 index 0340b60d..00000000 --- a/vendor/github.com/go-openapi/spec/response.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// ResponseProps properties specific to a response -type ResponseProps struct { - Description string `json:"description"` - Schema *Schema `json:"schema,omitempty"` - Headers map[string]Header `json:"headers,omitempty"` - Examples map[string]interface{} `json:"examples,omitempty"` -} - -// Response describes a single response from an API Operation. -// -// For more information: http://goo.gl/8us55a#responseObject -type Response struct { - Refable - ResponseProps - VendorExtensible -} - -// JSONLookup look up a value by the json property name -func (r Response) JSONLookup(token string) (interface{}, error) { - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if token == "$ref" { - return &r.Ref, nil - } - ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) - return ptr, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Response) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponseProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.Refable); err != nil { - return err - } - return json.Unmarshal(data, &r.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (r Response) MarshalJSON() ([]byte, error) { - var ( - b1 []byte - err error - ) - - if r.Ref.String() == "" { - // when there is no $ref, empty description is rendered as an empty string - b1, err = json.Marshal(r.ResponseProps) - } else { - // when there is $ref inside the schema, description should be omitempty-ied - b1, err = json.Marshal(struct { - Description string `json:"description,omitempty"` - Schema *Schema `json:"schema,omitempty"` - Headers map[string]Header `json:"headers,omitempty"` - Examples map[string]interface{} `json:"examples,omitempty"` - }{ - Description: r.ResponseProps.Description, - Schema: r.ResponseProps.Schema, - Examples: r.ResponseProps.Examples, - }) - } - if err != nil { - return nil, err - } - - b2, err := json.Marshal(r.Refable) - if err != nil { - return nil, err - } - b3, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// NewResponse creates a new response instance -func NewResponse() *Response { - return new(Response) -} - -// ResponseRef creates a response as a json reference -func ResponseRef(url string) *Response { - resp := NewResponse() - resp.Ref = MustCreateRef(url) - return resp -} - -// WithDescription sets the description on this response, allows for chaining -func (r *Response) WithDescription(description string) *Response { - r.Description = description - return r -} - -// WithSchema sets the schema on this response, allows for chaining. -// Passing a nil argument removes the schema from this response -func (r *Response) WithSchema(schema *Schema) *Response { - r.Schema = schema - return r -} - -// AddHeader adds a header to this response -func (r *Response) AddHeader(name string, header *Header) *Response { - if header == nil { - return r.RemoveHeader(name) - } - if r.Headers == nil { - r.Headers = make(map[string]Header) - } - r.Headers[name] = *header - return r -} - -// RemoveHeader removes a header from this response -func (r *Response) RemoveHeader(name string) *Response { - delete(r.Headers, name) - return r -} - -// AddExample adds an example to this response -func (r *Response) AddExample(mediaType string, example interface{}) *Response { - if r.Examples == nil { - r.Examples = make(map[string]interface{}) - } - r.Examples[mediaType] = example - return r -} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go deleted file mode 100644 index 16c3076f..00000000 --- a/vendor/github.com/go-openapi/spec/responses.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/go-openapi/swag" -) - -// Responses is a container for the expected responses of an operation. -// The container maps a HTTP response code to the expected response. -// It is not expected from the documentation to necessarily cover all possible HTTP response codes, -// since they may not be known in advance. However, it is expected from the documentation to cover -// a successful operation response and any known errors. -// -// The `default` can be used a default response object for all HTTP codes that are not covered -// individually by the specification. -// -// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response -// for a successful operation call. -// -// For more information: http://goo.gl/8us55a#responsesObject -type Responses struct { - VendorExtensible - ResponsesProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (r Responses) JSONLookup(token string) (interface{}, error) { - if token == "default" { - return r.Default, nil - } - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if i, err := strconv.Atoi(token); err == nil { - if scr, ok := r.StatusCodeResponses[i]; ok { - return scr, nil - } - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Responses) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { - return err - } - - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { - r.ResponsesProps = ResponsesProps{} - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (r Responses) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponsesProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// ResponsesProps describes all responses for an operation. -// It tells what is the default response and maps all responses with a -// HTTP status code. -type ResponsesProps struct { - Default *Response - StatusCodeResponses map[int]Response -} - -// MarshalJSON marshals responses as JSON -func (r ResponsesProps) MarshalJSON() ([]byte, error) { - toser := map[string]Response{} - if r.Default != nil { - toser["default"] = *r.Default - } - for k, v := range r.StatusCodeResponses { - toser[strconv.Itoa(k)] = v - } - return json.Marshal(toser) -} - -// UnmarshalJSON unmarshals responses from JSON -func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]json.RawMessage - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - if v, ok := res["default"]; ok { - var defaultRes Response - if err := json.Unmarshal(v, &defaultRes); err != nil { - return err - } - r.Default = &defaultRes - delete(res, "default") - } - for k, v := range res { - if !strings.HasPrefix(k, "x-") { - var statusCodeResp Response - if err := json.Unmarshal(v, &statusCodeResp); err != nil { - return err - } - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]Response{} - } - r.StatusCodeResponses[nk] = statusCodeResp - } - } - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go deleted file mode 100644 index 4e9be857..00000000 --- a/vendor/github.com/go-openapi/spec/schema.go +++ /dev/null @@ -1,645 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// BooleanProperty creates a boolean property -func BooleanProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} -} - -// BoolProperty creates a boolean property -func BoolProperty() *Schema { return BooleanProperty() } - -// StringProperty creates a string property -func StringProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// CharProperty creates a string property -func CharProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// Float64Property creates a float64/double property -func Float64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} -} - -// Float32Property creates a float32/float property -func Float32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} -} - -// Int8Property creates an int8 property -func Int8Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} -} - -// Int16Property creates an int16 property -func Int16Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} -} - -// Int32Property creates an int32 property -func Int32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} -} - -// Int64Property creates an int64 property -func Int64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} -} - -// StrFmtProperty creates a property for the named string format -func StrFmtProperty(format string) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} -} - -// DateProperty creates a date property -func DateProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} -} - -// DateTimeProperty creates a date time property -func DateTimeProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} -} - -// MapProperty creates a map property -func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, - AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} -} - -// RefProperty creates a ref property -func RefProperty(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// RefSchema creates a ref property -func RefSchema(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// ArrayProperty creates an array property -func ArrayProperty(items *Schema) *Schema { - if items == nil { - return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} - } - return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} -} - -// ComposedSchema creates a schema with allOf -func ComposedSchema(schemas ...Schema) *Schema { - s := new(Schema) - s.AllOf = schemas - return s -} - -// SchemaURL represents a schema url -type SchemaURL string - -// MarshalJSON marshal this to JSON -func (r SchemaURL) MarshalJSON() ([]byte, error) { - if r == "" { - return []byte("{}"), nil - } - v := map[string]interface{}{"$schema": string(r)} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshal this from JSON -func (r *SchemaURL) UnmarshalJSON(data []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - return r.fromMap(v) -} - -func (r *SchemaURL) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - if vv, ok := v["$schema"]; ok { - if str, ok := vv.(string); ok { - u, err := parseURL(str) - if err != nil { - return err - } - - *r = SchemaURL(u.String()) - } - } - return nil -} - -// SchemaProps describes a JSON schema (draft 4) -type SchemaProps struct { - ID string `json:"id,omitempty"` - Ref Ref `json:"-"` - Schema SchemaURL `json:"-"` - Description string `json:"description,omitempty"` - Type StringOrArray `json:"type,omitempty"` - Nullable bool `json:"nullable,omitempty"` - Format string `json:"format,omitempty"` - Title string `json:"title,omitempty"` - Default interface{} `json:"default,omitempty"` - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` - MaxProperties *int64 `json:"maxProperties,omitempty"` - MinProperties *int64 `json:"minProperties,omitempty"` - Required []string `json:"required,omitempty"` - Items *SchemaOrArray `json:"items,omitempty"` - AllOf []Schema `json:"allOf,omitempty"` - OneOf []Schema `json:"oneOf,omitempty"` - AnyOf []Schema `json:"anyOf,omitempty"` - Not *Schema `json:"not,omitempty"` - Properties SchemaProperties `json:"properties,omitempty"` - AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` - PatternProperties SchemaProperties `json:"patternProperties,omitempty"` - Dependencies Dependencies `json:"dependencies,omitempty"` - AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` - Definitions Definitions `json:"definitions,omitempty"` -} - -// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) -type SwaggerSchemaProps struct { - Discriminator string `json:"discriminator,omitempty"` - ReadOnly bool `json:"readOnly,omitempty"` - XML *XMLObject `json:"xml,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// Schema the schema object allows the definition of input and output data types. -// These types can be objects, but also primitives and arrays. -// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) -// and uses a predefined subset of it. -// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. -// -// For more information: http://goo.gl/8us55a#schemaObject -type Schema struct { - VendorExtensible - SchemaProps - SwaggerSchemaProps - ExtraProps map[string]interface{} `json:"-"` -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s Schema) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - if ex, ok := s.ExtraProps[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) - if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { - return r, err - } - r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) - return r, err -} - -// WithID sets the id for this schema, allows for chaining -func (s *Schema) WithID(id string) *Schema { - s.ID = id - return s -} - -// WithTitle sets the title for this schema, allows for chaining -func (s *Schema) WithTitle(title string) *Schema { - s.Title = title - return s -} - -// WithDescription sets the description for this schema, allows for chaining -func (s *Schema) WithDescription(description string) *Schema { - s.Description = description - return s -} - -// WithProperties sets the properties for this schema -func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { - s.Properties = schemas - return s -} - -// SetProperty sets a property on this schema -func (s *Schema) SetProperty(name string, schema Schema) *Schema { - if s.Properties == nil { - s.Properties = make(map[string]Schema) - } - s.Properties[name] = schema - return s -} - -// WithAllOf sets the all of property -func (s *Schema) WithAllOf(schemas ...Schema) *Schema { - s.AllOf = schemas - return s -} - -// WithMaxProperties sets the max number of properties an object can have -func (s *Schema) WithMaxProperties(max int64) *Schema { - s.MaxProperties = &max - return s -} - -// WithMinProperties sets the min number of properties an object must have -func (s *Schema) WithMinProperties(min int64) *Schema { - s.MinProperties = &min - return s -} - -// Typed sets the type of this schema for a single value item -func (s *Schema) Typed(tpe, format string) *Schema { - s.Type = []string{tpe} - s.Format = format - return s -} - -// AddType adds a type with potential format to the types for this schema -func (s *Schema) AddType(tpe, format string) *Schema { - s.Type = append(s.Type, tpe) - if format != "" { - s.Format = format - } - return s -} - -// AsNullable flags this schema as nullable. -func (s *Schema) AsNullable() *Schema { - s.Nullable = true - return s -} - -// CollectionOf a fluent builder method for an array parameter -func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{jsonArray} - s.Items = &SchemaOrArray{Schema: &items} - return s -} - -// WithDefault sets the default value on this parameter -func (s *Schema) WithDefault(defaultValue interface{}) *Schema { - s.Default = defaultValue - return s -} - -// WithRequired flags this parameter as required -func (s *Schema) WithRequired(items ...string) *Schema { - s.Required = items - return s -} - -// AddRequired adds field names to the required properties array -func (s *Schema) AddRequired(items ...string) *Schema { - s.Required = append(s.Required, items...) - return s -} - -// WithMaxLength sets a max length value -func (s *Schema) WithMaxLength(max int64) *Schema { - s.MaxLength = &max - return s -} - -// WithMinLength sets a min length value -func (s *Schema) WithMinLength(min int64) *Schema { - s.MinLength = &min - return s -} - -// WithPattern sets a pattern value -func (s *Schema) WithPattern(pattern string) *Schema { - s.Pattern = pattern - return s -} - -// WithMultipleOf sets a multiple of value -func (s *Schema) WithMultipleOf(number float64) *Schema { - s.MultipleOf = &number - return s -} - -// WithMaximum sets a maximum number value -func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { - s.Maximum = &max - s.ExclusiveMaximum = exclusive - return s -} - -// WithMinimum sets a minimum number value -func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { - s.Minimum = &min - s.ExclusiveMinimum = exclusive - return s -} - -// WithEnum sets a the enum values (replace) -func (s *Schema) WithEnum(values ...interface{}) *Schema { - s.Enum = append([]interface{}{}, values...) - return s -} - -// WithMaxItems sets the max items -func (s *Schema) WithMaxItems(size int64) *Schema { - s.MaxItems = &size - return s -} - -// WithMinItems sets the min items -func (s *Schema) WithMinItems(size int64) *Schema { - s.MinItems = &size - return s -} - -// UniqueValues dictates that this array can only have unique items -func (s *Schema) UniqueValues() *Schema { - s.UniqueItems = true - return s -} - -// AllowDuplicates this array can have duplicates -func (s *Schema) AllowDuplicates() *Schema { - s.UniqueItems = false - return s -} - -// AddToAllOf adds a schema to the allOf property -func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { - s.AllOf = append(s.AllOf, schemas...) - return s -} - -// WithDiscriminator sets the name of the discriminator field -func (s *Schema) WithDiscriminator(discriminator string) *Schema { - s.Discriminator = discriminator - return s -} - -// AsReadOnly flags this schema as readonly -func (s *Schema) AsReadOnly() *Schema { - s.ReadOnly = true - return s -} - -// AsWritable flags this schema as writeable (not read-only) -func (s *Schema) AsWritable() *Schema { - s.ReadOnly = false - return s -} - -// WithExample sets the example for this schema -func (s *Schema) WithExample(example interface{}) *Schema { - s.Example = example - return s -} - -// WithExternalDocs sets/removes the external docs for/from this schema. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (s *Schema) WithExternalDocs(description, url string) *Schema { - if description == "" && url == "" { - s.ExternalDocs = nil - return s - } - - if s.ExternalDocs == nil { - s.ExternalDocs = &ExternalDocumentation{} - } - s.ExternalDocs.Description = description - s.ExternalDocs.URL = url - return s -} - -// WithXMLName sets the xml name for the object -func (s *Schema) WithXMLName(name string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Name = name - return s -} - -// WithXMLNamespace sets the xml namespace for the object -func (s *Schema) WithXMLNamespace(namespace string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Namespace = namespace - return s -} - -// WithXMLPrefix sets the xml prefix for the object -func (s *Schema) WithXMLPrefix(prefix string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Prefix = prefix - return s -} - -// AsXMLAttribute flags this object as xml attribute -func (s *Schema) AsXMLAttribute() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = true - return s -} - -// AsXMLElement flags this object as an xml node -func (s *Schema) AsXMLElement() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = false - return s -} - -// AsWrappedXML flags this object as wrapped, this is mostly useful for array types -func (s *Schema) AsWrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = true - return s -} - -// AsUnwrappedXML flags this object as an xml node -func (s *Schema) AsUnwrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = false - return s -} - -// SetValidations defines all schema validations. -// -// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered. -func (s *Schema) SetValidations(val SchemaValidations) { - s.Maximum = val.Maximum - s.ExclusiveMaximum = val.ExclusiveMaximum - s.Minimum = val.Minimum - s.ExclusiveMinimum = val.ExclusiveMinimum - s.MaxLength = val.MaxLength - s.MinLength = val.MinLength - s.Pattern = val.Pattern - s.MaxItems = val.MaxItems - s.MinItems = val.MinItems - s.UniqueItems = val.UniqueItems - s.MultipleOf = val.MultipleOf - s.Enum = val.Enum - s.MinProperties = val.MinProperties - s.MaxProperties = val.MaxProperties - s.PatternProperties = val.PatternProperties -} - -// WithValidations is a fluent method to set schema validations -func (s *Schema) WithValidations(val SchemaValidations) *Schema { - s.SetValidations(val) - return s -} - -// Validations returns a clone of the validations for this schema -func (s Schema) Validations() SchemaValidations { - return SchemaValidations{ - CommonValidations: CommonValidations{ - Maximum: s.Maximum, - ExclusiveMaximum: s.ExclusiveMaximum, - Minimum: s.Minimum, - ExclusiveMinimum: s.ExclusiveMinimum, - MaxLength: s.MaxLength, - MinLength: s.MinLength, - Pattern: s.Pattern, - MaxItems: s.MaxItems, - MinItems: s.MinItems, - UniqueItems: s.UniqueItems, - MultipleOf: s.MultipleOf, - Enum: s.Enum, - }, - MinProperties: s.MinProperties, - MaxProperties: s.MaxProperties, - PatternProperties: s.PatternProperties, - } -} - -// MarshalJSON marshal this to JSON -func (s Schema) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SchemaProps) - if err != nil { - return nil, fmt.Errorf("schema props %v", err) - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, fmt.Errorf("vendor props %v", err) - } - b3, err := s.Ref.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("ref prop %v", err) - } - b4, err := s.Schema.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("schema prop %v", err) - } - b5, err := json.Marshal(s.SwaggerSchemaProps) - if err != nil { - return nil, fmt.Errorf("common validations %v", err) - } - var b6 []byte - if s.ExtraProps != nil { - jj, err := json.Marshal(s.ExtraProps) - if err != nil { - return nil, fmt.Errorf("extra props %v", err) - } - b6 = jj - } - return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *Schema) UnmarshalJSON(data []byte) error { - props := struct { - SchemaProps - SwaggerSchemaProps - }{} - if err := json.Unmarshal(data, &props); err != nil { - return err - } - - sch := Schema{ - SchemaProps: props.SchemaProps, - SwaggerSchemaProps: props.SwaggerSchemaProps, - } - - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - _ = sch.Ref.fromMap(d) - _ = sch.Schema.fromMap(d) - - delete(d, "$ref") - delete(d, "$schema") - for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { - delete(d, pn) - } - - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if sch.Extensions == nil { - sch.Extensions = map[string]interface{}{} - } - sch.Extensions[k] = vv - continue - } - if sch.ExtraProps == nil { - sch.ExtraProps = map[string]interface{}{} - } - sch.ExtraProps[k] = vv - } - - *s = sch - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go deleted file mode 100644 index 0059b99a..00000000 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "log" - "net/url" - "reflect" - "strings" - - "github.com/go-openapi/swag" -) - -// PathLoader is a function to use when loading remote refs. -// -// This is a package level default. It may be overridden or bypassed by -// specifying the loader in ExpandOptions. -// -// NOTE: if you are using the go-openapi/loads package, it will override -// this value with its own default (a loader to retrieve YAML documents as -// well as JSON ones). -var PathLoader = func(pth string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(pth) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// resolverContext allows to share a context during spec processing. -// At the moment, it just holds the index of circular references found. -type resolverContext struct { - // circulars holds all visited circular references, to shortcircuit $ref resolution. - // - // This structure is privately instantiated and needs not be locked against - // concurrent access, unless we chose to implement a parallel spec walking. - circulars map[string]bool - basePath string - loadDoc func(string) (json.RawMessage, error) - rootID string -} - -func newResolverContext(options *ExpandOptions) *resolverContext { - expandOptions := optionsOrDefault(options) - - // path loader may be overridden by options - var loader func(string) (json.RawMessage, error) - if expandOptions.PathLoader == nil { - loader = PathLoader - } else { - loader = expandOptions.PathLoader - } - - return &resolverContext{ - circulars: make(map[string]bool), - basePath: expandOptions.RelativeBase, // keep the root base path in context - loadDoc: loader, - } -} - -type schemaLoader struct { - root interface{} - options *ExpandOptions - cache ResolutionCache - context *resolverContext -} - -func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader { - if ref.IsRoot() || ref.HasFragmentOnly { - return r - } - - baseRef := MustCreateRef(basePath) - currentRef := normalizeRef(&ref, basePath) - if strings.HasPrefix(currentRef.String(), baseRef.String()) { - return r - } - - // set a new root against which to resolve - rootURL := currentRef.GetURL() - rootURL.Fragment = "" - root, _ := r.cache.Get(rootURL.String()) - - // shallow copy of resolver options to set a new RelativeBase when - // traversing multiple documents - newOptions := r.options - newOptions.RelativeBase = rootURL.String() - - return defaultSchemaLoader(root, newOptions, r.cache, r.context) -} - -func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { - if transitive != r { - if transitive.options != nil && transitive.options.RelativeBase != "" { - return normalizeBase(transitive.options.RelativeBase) - } - } - - return basePath -} - -func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { - tgt := reflect.ValueOf(target) - if tgt.Kind() != reflect.Ptr { - return ErrResolveRefNeedsAPointer - } - - if ref.GetURL() == nil { - return nil - } - - var ( - res interface{} - data interface{} - err error - ) - - // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means - // it is pointing somewhere in the root. - root := r.root - if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { - if baseRef, erb := NewRef(basePath); erb == nil { - root, _, _, _ = r.load(baseRef.GetURL()) - } - } - - if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { - data = root - } else { - baseRef := normalizeRef(ref, basePath) - data, _, _, err = r.load(baseRef.GetURL()) - if err != nil { - return err - } - } - - res = data - if ref.String() != "" { - res, _, err = ref.GetPointer().Get(data) - if err != nil { - return err - } - } - return swag.DynamicJSONToStruct(res, target) -} - -func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { - debugLog("loading schema from url: %s", refURL) - toFetch := *refURL - toFetch.Fragment = "" - - var err error - pth := toFetch.String() - normalized := normalizeBase(pth) - debugLog("loading doc from: %s", normalized) - - data, fromCache := r.cache.Get(normalized) - if fromCache { - return data, toFetch, fromCache, nil - } - - b, err := r.context.loadDoc(normalized) - if err != nil { - return nil, url.URL{}, false, err - } - - var doc interface{} - if err := json.Unmarshal(b, &doc); err != nil { - return nil, url.URL{}, false, err - } - r.cache.Set(normalized, doc) - - return doc, toFetch, fromCache, nil -} - -// isCircular detects cycles in sequences of $ref. -// -// It relies on a private context (which needs not be locked). -func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { - normalizedRef := normalizeURI(ref.String(), basePath) - if _, ok := r.context.circulars[normalizedRef]; ok { - // circular $ref has been already detected in another explored cycle - foundCycle = true - return - } - foundCycle = swag.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased - if foundCycle { - r.context.circulars[normalizedRef] = true - } - return -} - -// Resolve resolves a reference against basePath and stores the result in target. -// -// Resolve is not in charge of following references: it only resolves ref by following its URL. -// -// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. -// -// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct -func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { - return r.resolveRef(ref, target, basePath) -} - -func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { - var ref *Ref - switch refable := input.(type) { - case *Schema: - ref = &refable.Ref - case *Parameter: - ref = &refable.Ref - case *Response: - ref = &refable.Ref - case *PathItem: - ref = &refable.Ref - default: - return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType) - } - - curRef := ref.String() - if curRef == "" { - return nil - } - - normalizedRef := normalizeRef(ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if r.isCircular(normalizedRef, basePath, parentRefs...) { - return nil - } - - if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { - return err - } - - if ref.String() == "" || ref.String() == curRef { - // done with rereferencing - return nil - } - - parentRefs = append(parentRefs, normalizedRef.String()) - return r.deref(input, parentRefs, normalizedBasePath) -} - -func (r *schemaLoader) shouldStopOnError(err error) bool { - if err != nil && !r.options.ContinueOnError { - return true - } - - if err != nil { - log.Println(err) - } - - return false -} - -func (r *schemaLoader) setSchemaID(target interface{}, id, basePath string) (string, string) { - debugLog("schema has ID: %s", id) - - // handling the case when id is a folder - // remember that basePath has to point to a file - var refPath string - if strings.HasSuffix(id, "/") { - // ensure this is detected as a file, not a folder - refPath = fmt.Sprintf("%s%s", id, "placeholder.json") - } else { - refPath = id - } - - // updates the current base path - // * important: ID can be a relative path - // * registers target to be fetchable from the new base proposed by this id - newBasePath := normalizeURI(refPath, basePath) - - // store found IDs for possible future reuse in $ref - r.cache.Set(newBasePath, target) - - // the root document has an ID: all $ref relative to that ID may - // be rebased relative to the root document - if basePath == r.context.basePath { - debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath) - r.context.rootID = newBasePath - } - - return newBasePath, refPath -} - -func defaultSchemaLoader( - root interface{}, - expandOptions *ExpandOptions, - cache ResolutionCache, - context *resolverContext) *schemaLoader { - - if expandOptions == nil { - expandOptions = &ExpandOptions{} - } - - cache = cacheOrDefault(cache) - - if expandOptions.RelativeBase == "" { - // if no relative base is provided, assume the root document - // contains all $ref, or at least, that the relative documents - // may be resolved from the current working directory. - expandOptions.RelativeBase = baseForRoot(root, cache) - } - debugLog("effective expander options: %#v", expandOptions) - - if context == nil { - context = newResolverContext(expandOptions) - } - - return &schemaLoader{ - root: root, - options: expandOptions, - cache: cache, - context: context, - } -} diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json deleted file mode 100644 index bcbb8474..00000000 --- a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json +++ /dev/null @@ -1,149 +0,0 @@ -{ - "id": "http://json-schema.org/draft-04/schema#", - "$schema": "http://json-schema.org/draft-04/schema#", - "description": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] - }, - "simpleTypes": { - "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "minItems": 1, - "uniqueItems": true - } - }, - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "$schema": { - "type": "string" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "minimum": 0, - "exclusiveMinimum": true - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "boolean", - "default": false - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "boolean", - "default": false - }, - "maxLength": { "$ref": "#/definitions/positiveInteger" }, - "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/positiveInteger" }, - "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxProperties": { "$ref": "#/definitions/positiveInteger" }, - "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "format": { "type": "string" }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "dependencies": { - "exclusiveMaximum": [ "maximum" ], - "exclusiveMinimum": [ "minimum" ] - }, - "default": {} -} diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json deleted file mode 100644 index ebe10ed3..00000000 --- a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json +++ /dev/null @@ -1,1607 +0,0 @@ -{ - "title": "A JSON Schema for Swagger 2.0 API.", - "id": "http://swagger.io/v2/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "required": [ - "swagger", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "swagger": { - "type": "string", - "enum": [ - "2.0" - ], - "description": "The Swagger version of this document." - }, - "info": { - "$ref": "#/definitions/info" - }, - "host": { - "type": "string", - "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", - "description": "The host (name or ip) of the API. Example: 'swagger.io'" - }, - "basePath": { - "type": "string", - "pattern": "^/", - "description": "The base path to the API. Example: '/api'." - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "consumes": { - "description": "A list of MIME types accepted by the API.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "definitions": { - "$ref": "#/definitions/definitions" - }, - "parameters": { - "$ref": "#/definitions/parameterDefinitions" - }, - "responses": { - "$ref": "#/definitions/responseDefinitions" - }, - "security": { - "$ref": "#/definitions/security" - }, - "securityDefinitions": { - "$ref": "#/definitions/securityDefinitions" - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "General information about the API.", - "required": [ - "version", - "title" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "title": { - "type": "string", - "description": "A unique and precise title of the API." - }, - "version": { - "type": "string", - "description": "A semantic version number of the API." - }, - "description": { - "type": "string", - "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." - }, - "termsOfService": { - "type": "string", - "description": "The terms of service for the API." - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the owners of the API.", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The identifying name of the contact person/organization." - }, - "url": { - "type": "string", - "description": "The URL pointing to the contact information.", - "format": "uri" - }, - "email": { - "type": "string", - "description": "The email address of the contact person/organization.", - "format": "email" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "license": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The name of the license type. It's encouraged to use an OSI compatible license." - }, - "url": { - "type": "string", - "description": "The URL pointing to the license.", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "paths": { - "type": "object", - "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - }, - "^/": { - "$ref": "#/definitions/pathItem" - } - }, - "additionalProperties": false - }, - "definitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "description": "One or more JSON objects describing the schemas being consumed and produced by the API." - }, - "parameterDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameter" - }, - "description": "One or more JSON representations for parameters" - }, - "responseDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/response" - }, - "description": "One or more JSON representations for responses" - }, - "externalDocs": { - "type": "object", - "additionalProperties": false, - "description": "information about external documentation", - "required": [ - "url" - ], - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "examples": { - "type": "object", - "additionalProperties": true - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the HTTP message." - }, - "operation": { - "type": "object", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string", - "description": "A brief summary of the operation." - }, - "description": { - "type": "string", - "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string", - "description": "A unique identifier of the operation." - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "consumes": { - "description": "A list of MIME types the API can consume.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "parameters": { - "$ref": "#/definitions/parametersList" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "deprecated": { - "type": "boolean", - "default": false - }, - "security": { - "$ref": "#/definitions/security" - } - } - }, - "pathItem": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "parameters": { - "$ref": "#/definitions/parametersList" - } - } - }, - "responses": { - "type": "object", - "description": "Response objects names can either be any valid HTTP status code or 'default'.", - "minProperties": 1, - "additionalProperties": false, - "patternProperties": { - "^([0-9]{3})$|^(default)$": { - "$ref": "#/definitions/responseValue" - }, - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "not": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - } - }, - "responseValue": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "response": { - "type": "object", - "required": [ - "description" - ], - "properties": { - "description": { - "type": "string" - }, - "schema": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/fileSchema" - } - ] - }, - "headers": { - "$ref": "#/definitions/headers" - }, - "examples": { - "$ref": "#/definitions/examples" - } - }, - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "headers": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/header" - } - }, - "header": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "vendorExtension": { - "description": "Any property starting with x- is valid.", - "additionalProperties": true, - "additionalItems": true - }, - "bodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "schema" - ], - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "body" - ] - }, - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "schema": { - "$ref": "#/definitions/schema" - } - }, - "additionalProperties": false - }, - "headerParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "header" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "queryParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "query" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "formDataParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "formData" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array", - "file" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "pathParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "required" - ], - "properties": { - "required": { - "type": "boolean", - "enum": [ - true - ], - "description": "Determines whether or not this parameter is required or optional." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "path" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "nonBodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "type" - ], - "oneOf": [ - { - "$ref": "#/definitions/headerParameterSubSchema" - }, - { - "$ref": "#/definitions/formDataParameterSubSchema" - }, - { - "$ref": "#/definitions/queryParameterSubSchema" - }, - { - "$ref": "#/definitions/pathParameterSubSchema" - } - ] - }, - "parameter": { - "oneOf": [ - { - "$ref": "#/definitions/bodyParameter" - }, - { - "$ref": "#/definitions/nonBodyParameter" - } - ] - }, - "schema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "additionalProperties": { - "anyOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "boolean" - } - ], - "default": {} - }, - "type": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/type" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - } - ], - "default": {} - }, - "allOf": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "default": {} - }, - "discriminator": { - "type": "string" - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "fileSchema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "type" - ], - "properties": { - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "type": { - "type": "string", - "enum": [ - "file" - ] - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "primitivesItems": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "securityRequirement": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "xml": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean", - "default": false - }, - "wrapped": { - "type": "boolean", - "default": false - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "tag": { - "type": "object", - "additionalProperties": false, - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "securityDefinitions": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/basicAuthenticationSecurity" - }, - { - "$ref": "#/definitions/apiKeySecurity" - }, - { - "$ref": "#/definitions/oauth2ImplicitSecurity" - }, - { - "$ref": "#/definitions/oauth2PasswordSecurity" - }, - { - "$ref": "#/definitions/oauth2ApplicationSecurity" - }, - { - "$ref": "#/definitions/oauth2AccessCodeSecurity" - } - ] - } - }, - "basicAuthenticationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "basic" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "apiKeySecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "name", - "in" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "apiKey" - ] - }, - "name": { - "type": "string" - }, - "in": { - "type": "string", - "enum": [ - "header", - "query" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ImplicitSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "implicit" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2PasswordSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "password" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ApplicationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "application" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2AccessCodeSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "accessCode" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2Scopes": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "mediaTypeList": { - "type": "array", - "items": { - "$ref": "#/definitions/mimeType" - }, - "uniqueItems": true - }, - "parametersList": { - "type": "array", - "description": "The parameters needed to send a valid API call.", - "additionalItems": false, - "items": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "uniqueItems": true - }, - "schemesList": { - "type": "array", - "description": "The transfer protocol of the API.", - "items": { - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss" - ] - }, - "uniqueItems": true - }, - "collectionFormat": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes" - ], - "default": "csv" - }, - "collectionFormatWithMulti": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes", - "multi" - ], - "default": "csv" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "jsonReference": { - "type": "object", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - } - } - } - } -} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go deleted file mode 100644 index 9d0bdae9..00000000 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - basic = "basic" - apiKey = "apiKey" - oauth2 = "oauth2" - implicit = "implicit" - password = "password" - application = "application" - accessCode = "accessCode" -) - -// BasicAuth creates a basic auth security scheme -func BasicAuth() *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} -} - -// APIKeyAuth creates an api key auth security scheme -func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} -} - -// OAuth2Implicit creates an implicit flow oauth2 security scheme -func OAuth2Implicit(authorizationURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: implicit, - AuthorizationURL: authorizationURL, - }} -} - -// OAuth2Password creates a password flow oauth2 security scheme -func OAuth2Password(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: password, - TokenURL: tokenURL, - }} -} - -// OAuth2Application creates an application flow oauth2 security scheme -func OAuth2Application(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: application, - TokenURL: tokenURL, - }} -} - -// OAuth2AccessToken creates an access token flow oauth2 security scheme -func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: accessCode, - AuthorizationURL: authorizationURL, - TokenURL: tokenURL, - }} -} - -// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section -type SecuritySchemeProps struct { - Description string `json:"description,omitempty"` - Type string `json:"type"` - Name string `json:"name,omitempty"` // api key - In string `json:"in,omitempty"` // api key - Flow string `json:"flow,omitempty"` // oauth2 - AuthorizationURL string `json:"authorizationUrl"` // oauth2 - TokenURL string `json:"tokenUrl,omitempty"` // oauth2 - Scopes map[string]string `json:"scopes,omitempty"` // oauth2 -} - -// AddScope adds a scope to this security scheme -func (s *SecuritySchemeProps) AddScope(scope, description string) { - if s.Scopes == nil { - s.Scopes = make(map[string]string) - } - s.Scopes[scope] = description -} - -// SecurityScheme allows the definition of a security scheme that can be used by the operations. -// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) -// and OAuth2's common flows (implicit, password, application and access code). -// -// For more information: http://goo.gl/8us55a#securitySchemeObject -type SecurityScheme struct { - VendorExtensible - SecuritySchemeProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (s SecurityScheme) MarshalJSON() ([]byte, error) { - var ( - b1 []byte - err error - ) - - if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") { - // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string - b1, err = json.Marshal(s.SecuritySchemeProps) - } else { - // when not oauth2, empty AuthorizationURL should be omitted - b1, err = json.Marshal(struct { - Description string `json:"description,omitempty"` - Type string `json:"type"` - Name string `json:"name,omitempty"` // api key - In string `json:"in,omitempty"` // api key - Flow string `json:"flow,omitempty"` // oauth2 - AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 - TokenURL string `json:"tokenUrl,omitempty"` // oauth2 - Scopes map[string]string `json:"scopes,omitempty"` // oauth2 - }{ - Description: s.Description, - Type: s.Type, - Name: s.Name, - In: s.In, - Flow: s.Flow, - AuthorizationURL: s.AuthorizationURL, - TokenURL: s.TokenURL, - Scopes: s.Scopes, - }) - } - if err != nil { - return nil, err - } - - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *SecurityScheme) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { - return err - } - return json.Unmarshal(data, &s.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go deleted file mode 100644 index 876aa127..00000000 --- a/vendor/github.com/go-openapi/spec/spec.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" -) - -//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json -//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema -//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... -//go:generate perl -pi -e s,Json,JSON,g bindata.go - -const ( - // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs - SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema - JSONSchemaURL = "http://json-schema.org/draft-04/schema#" -) - -// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error -func MustLoadJSONSchemaDraft04() *Schema { - d, e := JSONSchemaDraft04() - if e != nil { - panic(e) - } - return d -} - -// JSONSchemaDraft04 loads the json schema document for json shema draft04 -func JSONSchemaDraft04() (*Schema, error) { - b, err := jsonschemaDraft04JSONBytes() - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} - -// MustLoadSwagger20Schema panics when Swagger20Schema returns an error -func MustLoadSwagger20Schema() *Schema { - d, e := Swagger20Schema() - if e != nil { - panic(e) - } - return d -} - -// Swagger20Schema loads the swagger 2.0 schema from the embedded assets -func Swagger20Schema() (*Schema, error) { - - b, err := v2SchemaJSONBytes() - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go deleted file mode 100644 index 1590fd17..00000000 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "fmt" - "strconv" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) -// together into one document. -// -// For more information: http://goo.gl/8us55a#swagger-object- -type Swagger struct { - VendorExtensible - SwaggerProps -} - -// JSONLookup look up a value by the json property name -func (s Swagger) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) - return r, err -} - -// MarshalJSON marshals this swagger structure to json -func (s Swagger) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SwaggerProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON unmarshals a swagger spec from json -func (s *Swagger) UnmarshalJSON(data []byte) error { - var sw Swagger - if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { - return err - } - if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { - return err - } - *s = sw - return nil -} - -// GobEncode provides a safe gob encoder for Swagger, including extensions -func (s Swagger) GobEncode() ([]byte, error) { - var b bytes.Buffer - raw := struct { - Props SwaggerProps - Ext VendorExtensible - }{ - Props: s.SwaggerProps, - Ext: s.VendorExtensible, - } - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Swagger, including extensions -func (s *Swagger) GobDecode(b []byte) error { - var raw struct { - Props SwaggerProps - Ext VendorExtensible - } - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - s.SwaggerProps = raw.Props - s.VendorExtensible = raw.Ext - return nil -} - -// SwaggerProps captures the top-level properties of an Api specification -// -// NOTE: validation rules -// - the scheme, when present must be from [http, https, ws, wss] -// - BasePath must start with a leading "/" -// - Paths is required -type SwaggerProps struct { - ID string `json:"id,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Swagger string `json:"swagger,omitempty"` - Info *Info `json:"info,omitempty"` - Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` - Paths *Paths `json:"paths"` - Definitions Definitions `json:"definitions,omitempty"` - Parameters map[string]Parameter `json:"parameters,omitempty"` - Responses map[string]Response `json:"responses,omitempty"` - SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Tags []Tag `json:"tags,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -type swaggerPropsAlias SwaggerProps - -type gobSwaggerPropsAlias struct { - Security []map[string]struct { - List []string - Pad bool - } - Alias *swaggerPropsAlias - SecurityIsEmpty bool -} - -// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements -func (o SwaggerProps) GobEncode() ([]byte, error) { - raw := gobSwaggerPropsAlias{ - Alias: (*swaggerPropsAlias)(&o), - } - - var b bytes.Buffer - if o.Security == nil { - // nil security requirement - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - if len(o.Security) == 0 { - // empty, but non-nil security requirement - raw.SecurityIsEmpty = true - raw.Alias.Security = nil - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - raw.Security = make([]map[string]struct { - List []string - Pad bool - }, 0, len(o.Security)) - for _, req := range o.Security { - v := make(map[string]struct { - List []string - Pad bool - }, len(req)) - for k, val := range req { - v[k] = struct { - List []string - Pad bool - }{ - List: val, - } - } - raw.Security = append(raw.Security, v) - } - - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements -func (o *SwaggerProps) GobDecode(b []byte) error { - var raw gobSwaggerPropsAlias - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - if raw.Alias == nil { - return nil - } - - switch { - case raw.SecurityIsEmpty: - // empty, but non-nil security requirement - raw.Alias.Security = []map[string][]string{} - case len(raw.Alias.Security) == 0: - // nil security requirement - raw.Alias.Security = nil - default: - raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) - for _, req := range raw.Security { - v := make(map[string][]string, len(req)) - for k, val := range req { - v[k] = make([]string, 0, len(val.List)) - v[k] = append(v[k], val.List...) - } - raw.Alias.Security = append(raw.Alias.Security, v) - } - } - - *o = *(*SwaggerProps)(raw.Alias) - return nil -} - -// Dependencies represent a dependencies property -type Dependencies map[string]SchemaOrStringArray - -// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property -type SchemaOrBool struct { - Allows bool - Schema *Schema -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { - if token == "allows" { - return s.Allows, nil - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -var jsTrue = []byte("true") -var jsFalse = []byte("false") - -// MarshalJSON convert this object to JSON -func (s SchemaOrBool) MarshalJSON() ([]byte, error) { - if s.Schema != nil { - return json.Marshal(s.Schema) - } - - if s.Schema == nil && !s.Allows { - return jsFalse, nil - } - return jsTrue, nil -} - -// UnmarshalJSON converts this bool or schema object from a JSON structure -func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { - var nw SchemaOrBool - if len(data) > 0 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - nw.Allows = !bytes.Equal(data, []byte("false")) - } - *s = nw - return nil -} - -// SchemaOrStringArray represents a schema or a string array -type SchemaOrStringArray struct { - Schema *Schema - Property []string -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { - if len(s.Property) > 0 { - return json.Marshal(s.Property) - } - if s.Schema != nil { - return json.Marshal(s.Schema) - } - return []byte("null"), nil -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - var nw SchemaOrStringArray - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Property); err != nil { - return err - } - } - *s = nw - return nil -} - -// Definitions contains the models explicitly defined in this spec -// An object to hold data types that can be consumed and produced by operations. -// These data types can be primitives, arrays or models. -// -// For more information: http://goo.gl/8us55a#definitionsObject -type Definitions map[string]Schema - -// SecurityDefinitions a declaration of the security schemes available to be used in the specification. -// This does not enforce the security schemes on the operations and only serves to provide -// the relevant details for each scheme. -// -// For more information: http://goo.gl/8us55a#securityDefinitionsObject -type SecurityDefinitions map[string]*SecurityScheme - -// StringOrArray represents a value that can either be a string -// or an array of strings. Mainly here for serialization purposes -type StringOrArray []string - -// Contains returns true when the value is contained in the slice -func (s StringOrArray) Contains(value string) bool { - for _, str := range s { - if str == value { - return true - } - } - return false -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { - if _, err := strconv.Atoi(token); err == nil { - r, _, err := jsonpointer.GetForToken(s.Schemas, token) - return r, err - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string -func (s *StringOrArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - - if first == '[' { - var parsed []string - if err := json.Unmarshal(data, &parsed); err != nil { - return err - } - *s = StringOrArray(parsed) - return nil - } - - var single interface{} - if err := json.Unmarshal(data, &single); err != nil { - return err - } - if single == nil { - return nil - } - switch v := single.(type) { - case string: - *s = StringOrArray([]string{v}) - return nil - default: - return fmt.Errorf("only string or array is allowed, not %T", single) - } -} - -// MarshalJSON converts this string or array to a JSON array or JSON string -func (s StringOrArray) MarshalJSON() ([]byte, error) { - if len(s) == 1 { - return json.Marshal([]string(s)[0]) - } - return json.Marshal([]string(s)) -} - -// SchemaOrArray represents a value that can either be a Schema -// or an array of Schema. Mainly here for serialization purposes -type SchemaOrArray struct { - Schema *Schema - Schemas []Schema -} - -// Len returns the number of schemas in this property -func (s SchemaOrArray) Len() int { - if s.Schema != nil { - return 1 - } - return len(s.Schemas) -} - -// ContainsType returns true when one of the schemas is of the specified type -func (s *SchemaOrArray) ContainsType(name string) bool { - if s.Schema != nil { - return s.Schema.Type != nil && s.Schema.Type.Contains(name) - } - return false -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { - return json.Marshal(s.Schemas) - } - return json.Marshal(s.Schema) -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { - var nw SchemaOrArray - var first byte - if len(data) > 1 { - first = data[0] - } - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Schemas); err != nil { - return err - } - } - *s = nw - return nil -} - -// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go deleted file mode 100644 index faa3d3de..00000000 --- a/vendor/github.com/go-openapi/spec/tag.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// TagProps describe a tag entry in the top level tags section of a swagger spec -type TagProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// NewTag creates a new tag -func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { - return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} -} - -// Tag allows adding meta data to a single tag that is used by the -// [Operation Object](http://goo.gl/8us55a#operationObject). -// It is not mandatory to have a Tag Object per tag used there. -// -// For more information: http://goo.gl/8us55a#tagObject -type Tag struct { - VendorExtensible - TagProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (t Tag) JSONLookup(token string) (interface{}, error) { - if ex, ok := t.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(t.TagProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (t Tag) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(t.TagProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(t.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (t *Tag) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &t.TagProps); err != nil { - return err - } - return json.Unmarshal(data, &t.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go deleted file mode 100644 index 5bdfe40b..00000000 --- a/vendor/github.com/go-openapi/spec/url_go19.go +++ /dev/null @@ -1,11 +0,0 @@ -package spec - -import "net/url" - -func parseURL(s string) (*url.URL, error) { - u, err := url.Parse(s) - if err == nil { - u.OmitHost = false - } - return u, err -} diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go deleted file mode 100644 index 6360a8ea..00000000 --- a/vendor/github.com/go-openapi/spec/validations.go +++ /dev/null @@ -1,215 +0,0 @@ -package spec - -// CommonValidations describe common JSON-schema validations -type CommonValidations struct { - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` -} - -// SetValidations defines all validations for a simple schema. -// -// NOTE: the input is the larger set of validations available for schemas. -// For simple schemas, MinProperties and MaxProperties are ignored. -func (v *CommonValidations) SetValidations(val SchemaValidations) { - v.Maximum = val.Maximum - v.ExclusiveMaximum = val.ExclusiveMaximum - v.Minimum = val.Minimum - v.ExclusiveMinimum = val.ExclusiveMinimum - v.MaxLength = val.MaxLength - v.MinLength = val.MinLength - v.Pattern = val.Pattern - v.MaxItems = val.MaxItems - v.MinItems = val.MinItems - v.UniqueItems = val.UniqueItems - v.MultipleOf = val.MultipleOf - v.Enum = val.Enum -} - -type clearedValidation struct { - Validation string - Value interface{} -} - -type clearedValidations []clearedValidation - -func (c clearedValidations) apply(cbs []func(string, interface{})) { - for _, cb := range cbs { - for _, cleared := range c { - cb(cleared.Validation, cleared.Value) - } - } -} - -// ClearNumberValidations clears all number validations. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 5) - defer func() { - done.apply(cbs) - }() - - if v.Minimum != nil { - done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum}) - v.Minimum = nil - } - if v.Maximum != nil { - done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum}) - v.Maximum = nil - } - if v.ExclusiveMaximum { - done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum}) - v.ExclusiveMaximum = false - } - if v.ExclusiveMinimum { - done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum}) - v.ExclusiveMinimum = false - } - if v.MultipleOf != nil { - done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf}) - v.MultipleOf = nil - } -} - -// ClearStringValidations clears all string validations. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *CommonValidations) ClearStringValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 3) - defer func() { - done.apply(cbs) - }() - - if v.Pattern != "" { - done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern}) - v.Pattern = "" - } - if v.MinLength != nil { - done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength}) - v.MinLength = nil - } - if v.MaxLength != nil { - done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength}) - v.MaxLength = nil - } -} - -// ClearArrayValidations clears all array validations. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 3) - defer func() { - done.apply(cbs) - }() - - if v.MaxItems != nil { - done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems}) - v.MaxItems = nil - } - if v.MinItems != nil { - done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems}) - v.MinItems = nil - } - if v.UniqueItems { - done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems}) - v.UniqueItems = false - } -} - -// Validations returns a clone of the validations for a simple schema. -// -// NOTE: in the context of simple schema objects, MinProperties, MaxProperties -// and PatternProperties remain unset. -func (v CommonValidations) Validations() SchemaValidations { - return SchemaValidations{ - CommonValidations: v, - } -} - -// HasNumberValidations indicates if the validations are for numbers or integers -func (v CommonValidations) HasNumberValidations() bool { - return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil -} - -// HasStringValidations indicates if the validations are for strings -func (v CommonValidations) HasStringValidations() bool { - return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" -} - -// HasArrayValidations indicates if the validations are for arrays -func (v CommonValidations) HasArrayValidations() bool { - return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems -} - -// HasEnum indicates if the validation includes some enum constraint -func (v CommonValidations) HasEnum() bool { - return len(v.Enum) > 0 -} - -// SchemaValidations describes the validation properties of a schema -// -// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change -// in the exported members: all initializers using litterals would fail. -type SchemaValidations struct { - CommonValidations - - PatternProperties SchemaProperties `json:"patternProperties,omitempty"` - MaxProperties *int64 `json:"maxProperties,omitempty"` - MinProperties *int64 `json:"minProperties,omitempty"` -} - -// HasObjectValidations indicates if the validations are for objects -func (v SchemaValidations) HasObjectValidations() bool { - return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil -} - -// SetValidations for schema validations -func (v *SchemaValidations) SetValidations(val SchemaValidations) { - v.CommonValidations.SetValidations(val) - v.PatternProperties = val.PatternProperties - v.MaxProperties = val.MaxProperties - v.MinProperties = val.MinProperties -} - -// Validations for a schema -func (v SchemaValidations) Validations() SchemaValidations { - val := v.CommonValidations.Validations() - val.PatternProperties = v.PatternProperties - val.MinProperties = v.MinProperties - val.MaxProperties = v.MaxProperties - return val -} - -// ClearObjectValidations returns a clone of the validations with all object validations cleared. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 3) - defer func() { - done.apply(cbs) - }() - - if v.MaxProperties != nil { - done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties}) - v.MaxProperties = nil - } - if v.MinProperties != nil { - done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties}) - v.MinProperties = nil - } - if v.PatternProperties != nil { - done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties}) - v.PatternProperties = nil - } -} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go deleted file mode 100644 index 945a4670..00000000 --- a/vendor/github.com/go-openapi/spec/xml_object.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// XMLObject a metadata object that allows for more fine-tuned XML model definitions. -// -// For more information: http://goo.gl/8us55a#xmlObject -type XMLObject struct { - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - Prefix string `json:"prefix,omitempty"` - Attribute bool `json:"attribute,omitempty"` - Wrapped bool `json:"wrapped,omitempty"` -} - -// WithName sets the xml name for the object -func (x *XMLObject) WithName(name string) *XMLObject { - x.Name = name - return x -} - -// WithNamespace sets the xml namespace for the object -func (x *XMLObject) WithNamespace(namespace string) *XMLObject { - x.Namespace = namespace - return x -} - -// WithPrefix sets the xml prefix for the object -func (x *XMLObject) WithPrefix(prefix string) *XMLObject { - x.Prefix = prefix - return x -} - -// AsAttribute flags this object as xml attribute -func (x *XMLObject) AsAttribute() *XMLObject { - x.Attribute = true - return x -} - -// AsElement flags this object as an xml node -func (x *XMLObject) AsElement() *XMLObject { - x.Attribute = false - return x -} - -// AsWrapped flags this object as wrapped, this is mostly useful for array types -func (x *XMLObject) AsWrapped() *XMLObject { - x.Wrapped = true - return x -} - -// AsUnwrapped flags this object as an xml node -func (x *XMLObject) AsUnwrapped() *XMLObject { - x.Wrapped = false - return x -} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig deleted file mode 100644 index 3152da69..00000000 --- a/vendor/github.com/go-openapi/strfmt/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.gitattributes b/vendor/github.com/go-openapi/strfmt/.gitattributes deleted file mode 100644 index d020be8e..00000000 --- a/vendor/github.com/go-openapi/strfmt/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -*.go text eol=lf - diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore deleted file mode 100644 index dd91ed6a..00000000 --- a/vendor/github.com/go-openapi/strfmt/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml deleted file mode 100644 index 22f8d21c..00000000 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ /dev/null @@ -1,61 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 3 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort - - exhaustruct - - varnamelen - - gci - - depguard - - errchkjson - - inamedparam - - nonamedreturns - - musttag - - ireturn - - forcetypeassert - - cyclop - # deprecated linters - - deadcode - - interfacer - - scopelint - - varcheck - - structcheck - - golint - - nosnakecase diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065..00000000 --- a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/go-openapi/strfmt/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md deleted file mode 100644 index f6b39c6c..00000000 --- a/vendor/github.com/go-openapi/strfmt/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) - -This package exposes a registry of data types to support string formats in the go-openapi toolkit. - -strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. - -## Supported data formats -go-openapi/strfmt follows the swagger 2.0 specification with the following formats -defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). - -It also provides convenient extensions to go-openapi users. - -- [x] JSON-schema draft 4 formats - - date-time - - email - - hostname - - ipv4 - - ipv6 - - uri -- [x] swagger 2.0 format extensions - - binary - - byte (e.g. base64 encoded string) - - date (e.g. "1970-01-01") - - password -- [x] go-openapi custom format extensions - - bsonobjectid (BSON objectID) - - creditcard - - duration (e.g. "3 weeks", "1ms") - - hexcolor (e.g. "#FFFFFF") - - isbn, isbn10, isbn13 - - mac (e.g "01:02:03:04:05:06") - - rgbcolor (e.g. "rgb(100,100,100)") - - ssn - - uuid, uuid3, uuid4, uuid5 - - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") - - ulid (e.g. "00000PP9HGSBSSDZ1JTEXBJ0PW", [spec](https://github.com/ulid/spec)) - -> NOTE: as the name stands for, this package is intended to support string formatting only. -> It does not provide validation for numerical values with swagger format extension for JSON types "number" or -> "integer" (e.g. float, double, int32...). - -## Type conversion - -All types defined here are stringers and may be converted to strings with `.String()`. -Note that most types defined by this package may be converted directly to string like `string(Email{})`. - -`Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. -Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` - -## Using pointers - -The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does -with primitive types. - -## Format types -Types defined in strfmt expose marshaling and validation capabilities. - -List of defined types: -- Base64 -- CreditCard -- Date -- DateTime -- Duration -- Email -- HexColor -- Hostname -- IPv4 -- IPv6 -- CIDR -- ISBN -- ISBN10 -- ISBN13 -- MAC -- ObjectId -- Password -- RGBColor -- SSN -- URI -- UUID -- UUID3 -- UUID4 -- UUID5 -- [ULID](https://github.com/ulid/spec) diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go deleted file mode 100644 index cfa9a526..00000000 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "fmt" - - "go.mongodb.org/mongo-driver/bson" - - "go.mongodb.org/mongo-driver/bson/bsontype" - bsonprim "go.mongodb.org/mongo-driver/bson/primitive" -) - -func init() { - var id ObjectId - // register this format in the default registry - Default.Add("bsonobjectid", &id, IsBSONObjectID) -} - -// IsBSONObjectID returns true when the string is a valid BSON.ObjectId -func IsBSONObjectID(str string) bool { - _, err := bsonprim.ObjectIDFromHex(str) - return err == nil -} - -// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) -// -// swagger:strfmt bsonobjectid -type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck - -// NewObjectId creates a ObjectId from a Hex String -func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck - oid, err := bsonprim.ObjectIDFromHex(hex) - if err != nil { - panic(err) - } - return ObjectId(oid) -} - -// MarshalText turns this instance into text -func (id ObjectId) MarshalText() ([]byte, error) { - oid := bsonprim.ObjectID(id) - if oid == bsonprim.NilObjectID { - return nil, nil - } - return []byte(oid.Hex()), nil -} - -// UnmarshalText hydrates this instance from text -func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on - if len(data) == 0 { - *id = ObjectId(bsonprim.NilObjectID) - return nil - } - oidstr := string(data) - oid, err := bsonprim.ObjectIDFromHex(oidstr) - if err != nil { - return err - } - *id = ObjectId(oid) - return nil -} - -// Scan read a value from a database driver -func (id *ObjectId) Scan(raw interface{}) error { - var data []byte - switch v := raw.(type) { - case []byte: - data = v - case string: - data = []byte(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) - } - - return id.UnmarshalText(data) -} - -// Value converts a value to a database driver value -func (id ObjectId) Value() (driver.Value, error) { - return driver.Value(bsonprim.ObjectID(id).Hex()), nil -} - -func (id ObjectId) String() string { - return bsonprim.ObjectID(id).Hex() -} - -// MarshalJSON returns the ObjectId as JSON -func (id ObjectId) MarshalJSON() ([]byte, error) { - return bsonprim.ObjectID(id).MarshalJSON() -} - -// UnmarshalJSON sets the ObjectId from JSON -func (id *ObjectId) UnmarshalJSON(data []byte) error { - var obj bsonprim.ObjectID - if err := obj.UnmarshalJSON(data); err != nil { - return err - } - *id = ObjectId(obj) - return nil -} - -// MarshalBSON renders the object id as a BSON document -func (id ObjectId) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) -} - -// UnmarshalBSON reads the objectId from a BSON document -func (id *ObjectId) UnmarshalBSON(data []byte) error { - var obj struct { - Data bsonprim.ObjectID - } - if err := bson.Unmarshal(data, &obj); err != nil { - return err - } - *id = ObjectId(obj.Data) - return nil -} - -// MarshalBSONValue is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { - oid := bsonprim.ObjectID(id) - return bson.TypeObjectID, oid[:], nil -} - -// UnmarshalBSONValue is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { - var oid bsonprim.ObjectID - copy(oid[:], data) - *id = ObjectId(oid) - return nil -} - -// DeepCopyInto copies the receiver and writes its value into out. -func (id *ObjectId) DeepCopyInto(out *ObjectId) { - *out = *id -} - -// DeepCopy copies the receiver into a new ObjectId. -func (id *ObjectId) DeepCopy() *ObjectId { - if id == nil { - return nil - } - out := new(ObjectId) - id.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go deleted file mode 100644 index 3c93381c..00000000 --- a/vendor/github.com/go-openapi/strfmt/date.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "encoding/json" - "errors" - "fmt" - "time" - - "go.mongodb.org/mongo-driver/bson" -) - -func init() { - d := Date{} - // register this format in the default registry - Default.Add("date", &d, IsDate) -} - -// IsDate returns true when the string is a valid date -func IsDate(str string) bool { - _, err := time.Parse(RFC3339FullDate, str) - return err == nil -} - -const ( - // RFC3339FullDate represents a full-date as specified by RFC3339 - // See: http://goo.gl/xXOvVd - RFC3339FullDate = "2006-01-02" -) - -// Date represents a date from the API -// -// swagger:strfmt date -type Date time.Time - -// String converts this date into a string -func (d Date) String() string { - return time.Time(d).Format(RFC3339FullDate) -} - -// UnmarshalText parses a text representation into a date type -func (d *Date) UnmarshalText(text []byte) error { - if len(text) == 0 { - return nil - } - dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation) - if err != nil { - return err - } - *d = Date(dd) - return nil -} - -// MarshalText serializes this date type to string -func (d Date) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// Scan scans a Date value from database driver type. -func (d *Date) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - return d.UnmarshalText(v) - case string: - return d.UnmarshalText([]byte(v)) - case time.Time: - *d = Date(v) - return nil - case nil: - *d = Date{} - return nil - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) - } -} - -// Value converts Date to a primitive value ready to written to a database. -func (d Date) Value() (driver.Value, error) { - return driver.Value(d.String()), nil -} - -// MarshalJSON returns the Date as JSON -func (d Date) MarshalJSON() ([]byte, error) { - return json.Marshal(time.Time(d).Format(RFC3339FullDate)) -} - -// UnmarshalJSON sets the Date from JSON -func (d *Date) UnmarshalJSON(data []byte) error { - if string(data) == jsonNull { - return nil - } - var strdate string - if err := json.Unmarshal(data, &strdate); err != nil { - return err - } - tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation) - if err != nil { - return err - } - *d = Date(tt) - return nil -} - -func (d Date) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": d.String()}) -} - -func (d *Date) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { - return err - } - - if data, ok := m["data"].(string); ok { - rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) - if err != nil { - return err - } - *d = Date(rd) - return nil - } - - return errors.New("couldn't unmarshal bson bytes value as Date") -} - -// DeepCopyInto copies the receiver and writes its value into out. -func (d *Date) DeepCopyInto(out *Date) { - *out = *d -} - -// DeepCopy copies the receiver into a new Date. -func (d *Date) DeepCopy() *Date { - if d == nil { - return nil - } - out := new(Date) - d.DeepCopyInto(out) - return out -} - -// GobEncode implements the gob.GobEncoder interface. -func (d Date) GobEncode() ([]byte, error) { - return d.MarshalBinary() -} - -// GobDecode implements the gob.GobDecoder interface. -func (d *Date) GobDecode(data []byte) error { - return d.UnmarshalBinary(data) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d Date) MarshalBinary() ([]byte, error) { - return time.Time(d).MarshalBinary() -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Date) UnmarshalBinary(data []byte) error { - var original time.Time - - err := original.UnmarshalBinary(data) - if err != nil { - return err - } - - *d = Date(original) - - return nil -} - -// Equal checks if two Date instances are equal -func (d Date) Equal(d2 Date) bool { - return time.Time(d).Equal(time.Time(d2)) -} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go deleted file mode 100644 index 28137140..00000000 --- a/vendor/github.com/go-openapi/strfmt/default.go +++ /dev/null @@ -1,2051 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/mail" - "regexp" - "strings" - - "github.com/asaskevich/govalidator" - "github.com/google/uuid" - "go.mongodb.org/mongo-driver/bson" -) - -const ( - // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 - // A string instance is valid against this attribute if it is a valid - // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - // http://tools.ietf.org/html/rfc1034#section-3.5 - // ::= any one of the ten digits 0 through 9 - // var digit = /[0-9]/; - // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case - // var letter = /[a-zA-Z]/; - // ::= | - // var letDig = /[0-9a-zA-Z]/; - // ::= | "-" - // var letDigHyp = /[-0-9a-zA-Z]/; - // ::= | - // var ldhStr = /[-0-9a-zA-Z]+/; - //
. */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); @@ -112258,6 +112815,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( if( aiMap ) aiMap[i] = j; } + assert( nExpr>0 && nExprmSubrtnSig & (1<<(pNewSig->selId&7)))==0 ) return 0; + assert( pExpr->op==TK_IN ); + assert( !ExprUseYSub(pExpr) ); + assert( ExprUseXSelect(pExpr) ); + assert( pExpr->x.pSelect!=0 ); + assert( (pExpr->x.pSelect->selFlags & SF_All)==0 ); + v = pParse->pVdbe; + assert( v!=0 ); + pOp = sqlite3VdbeGetOp(v, 1); + pEnd = sqlite3VdbeGetLastOp(v); + for(; pOpp4type!=P4_SUBRTNSIG ) continue; + assert( pOp->opcode==OP_BeginSubrtn ); + pSig = pOp->p4.pSubrtnSig; + assert( pSig!=0 ); + if( !pSig->bComplete ) continue; + if( pNewSig->selId!=pSig->selId ) continue; + if( strcmp(pNewSig->zAff,pSig->zAff)!=0 ) continue; + pExpr->y.sub.iAddr = pSig->iAddr; + pExpr->y.sub.regReturn = pSig->regReturn; + pExpr->iTable = pSig->iTable; + ExprSetProperty(pExpr, EP_Subrtn); + return 1; + } + return 0; +} +#endif /* SQLITE_OMIT_SUBQUERY */ + #ifndef SQLITE_OMIT_SUBQUERY /* ** Generate code that will construct an ephemeral table containing all terms @@ -112440,6 +113042,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( KeyInfo *pKeyInfo = 0; /* Key information */ int nVal; /* Size of vector pLeft */ Vdbe *v; /* The prepared statement under construction */ + SubrtnSig *pSig = 0; /* Signature for this subroutine */ v = pParse->pVdbe; assert( v!=0 ); @@ -112455,11 +113058,27 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( ** and reuse it many names. */ if( !ExprHasProperty(pExpr, EP_VarSelect) && pParse->iSelfTab==0 ){ - /* Reuse of the RHS is allowed */ - /* If this routine has already been coded, but the previous code - ** might not have been invoked yet, so invoke it now as a subroutine. + /* Reuse of the RHS is allowed + ** + ** Compute a signature for the RHS of the IN operator to facility + ** finding and reusing prior instances of the same IN operator. */ - if( ExprHasProperty(pExpr, EP_Subrtn) ){ + assert( !ExprUseXSelect(pExpr) || pExpr->x.pSelect!=0 ); + if( ExprUseXSelect(pExpr) && (pExpr->x.pSelect->selFlags & SF_All)==0 ){ + pSig = sqlite3DbMallocRawNN(pParse->db, sizeof(pSig[0])); + if( pSig ){ + pSig->selId = pExpr->x.pSelect->selId; + pSig->zAff = exprINAffinity(pParse, pExpr); + } + } + + /* Check to see if there is a prior materialization of the RHS of + ** this IN operator. If there is, then make use of that prior + ** materialization rather than recomputing it. + */ + if( ExprHasProperty(pExpr, EP_Subrtn) + || findCompatibleInRhsSubrtn(pParse, pExpr, pSig) + ){ addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); if( ExprUseXSelect(pExpr) ){ ExplainQueryPlan((pParse, 0, "REUSE LIST SUBQUERY %d", @@ -112471,6 +113090,10 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( assert( iTab!=pExpr->iTable ); sqlite3VdbeAddOp2(v, OP_OpenDup, iTab, pExpr->iTable); sqlite3VdbeJumpHere(v, addrOnce); + if( pSig ){ + sqlite3DbFree(pParse->db, pSig->zAff); + sqlite3DbFree(pParse->db, pSig); + } return; } @@ -112481,7 +113104,14 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( pExpr->y.sub.regReturn = ++pParse->nMem; pExpr->y.sub.iAddr = sqlite3VdbeAddOp2(v, OP_BeginSubrtn, 0, pExpr->y.sub.regReturn) + 1; - + if( pSig ){ + pSig->bComplete = 0; + pSig->iAddr = pExpr->y.sub.iAddr; + pSig->regReturn = pExpr->y.sub.regReturn; + pSig->iTable = iTab; + pParse->mSubrtnSig = 1 << (pSig->selId&7); + sqlite3VdbeChangeP4(v, -1, (const char*)pSig, P4_SUBRTNSIG); + } addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); } @@ -112522,15 +113152,30 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( SelectDest dest; int i; int rc; + int addrBloom = 0; sqlite3SelectDestInit(&dest, SRT_Set, iTab); dest.zAffSdst = exprINAffinity(pParse, pExpr); pSelect->iLimit = 0; + if( addrOnce && OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ){ + int regBloom = ++pParse->nMem; + addrBloom = sqlite3VdbeAddOp2(v, OP_Blob, 10000, regBloom); + VdbeComment((v, "Bloom filter")); + dest.iSDParm2 = regBloom; + } testcase( pSelect->selFlags & SF_Distinct ); testcase( pKeyInfo==0 ); /* Caused by OOM in sqlite3KeyInfoAlloc() */ pCopy = sqlite3SelectDup(pParse->db, pSelect, 0); rc = pParse->db->mallocFailed ? 1 :sqlite3Select(pParse, pCopy, &dest); sqlite3SelectDelete(pParse->db, pCopy); sqlite3DbFree(pParse->db, dest.zAffSdst); + if( addrBloom ){ + sqlite3VdbeGetOp(v, addrOnce)->p3 = dest.iSDParm2; + if( dest.iSDParm2==0 ){ + sqlite3VdbeChangeToNoop(v, addrBloom); + }else{ + sqlite3VdbeGetOp(v, addrOnce)->p3 = dest.iSDParm2; + } + } if( rc ){ sqlite3KeyInfoUnref(pKeyInfo); return; @@ -112596,6 +113241,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( sqlite3ReleaseTempReg(pParse, r1); sqlite3ReleaseTempReg(pParse, r2); } + if( pSig ) pSig->bComplete = 1; if( pKeyInfo ){ sqlite3VdbeChangeP4(v, addr, (void *)pKeyInfo, P4_KEYINFO); } @@ -112828,9 +113474,7 @@ static void sqlite3ExprCodeIN( if( sqlite3ExprCheckIN(pParse, pExpr) ) return; zAff = exprINAffinity(pParse, pExpr); nVector = sqlite3ExprVectorSize(pExpr->pLeft); - aiMap = (int*)sqlite3DbMallocZero( - pParse->db, nVector*(sizeof(int) + sizeof(char)) + 1 - ); + aiMap = (int*)sqlite3DbMallocZero(pParse->db, nVector*sizeof(int)); if( pParse->db->mallocFailed ) goto sqlite3ExprCodeIN_oom_error; /* Attempt to compute the RHS. After this step, if anything other than @@ -112973,6 +113617,15 @@ static void sqlite3ExprCodeIN( sqlite3VdbeAddOp4(v, OP_Affinity, rLhs, nVector, 0, zAff, nVector); if( destIfFalse==destIfNull ){ /* Combine Step 3 and Step 5 into a single opcode */ + if( ExprHasProperty(pExpr, EP_Subrtn) ){ + const VdbeOp *pOp = sqlite3VdbeGetOp(v, pExpr->y.sub.iAddr); + assert( pOp->opcode==OP_Once || pParse->nErr ); + if( pOp->opcode==OP_Once && pOp->p3>0 ){ + assert( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ); + sqlite3VdbeAddOp4Int(v, OP_Filter, pOp->p3, destIfFalse, + rLhs, nVector); VdbeCoverage(v); + } + } sqlite3VdbeAddOp4Int(v, OP_NotFound, iTab, destIfFalse, rLhs, nVector); VdbeCoverage(v); goto sqlite3ExprCodeIN_finished; @@ -113255,13 +113908,17 @@ SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int n ** register iReg. The caller must ensure that iReg already contains ** the correct value for the expression. */ -static void exprToRegister(Expr *pExpr, int iReg){ +SQLITE_PRIVATE void sqlite3ExprToRegister(Expr *pExpr, int iReg){ Expr *p = sqlite3ExprSkipCollateAndLikely(pExpr); if( NEVER(p==0) ) return; - p->op2 = p->op; - p->op = TK_REGISTER; - p->iTable = iReg; - ExprClearProperty(p, EP_Skip); + if( p->op==TK_REGISTER ){ + assert( p->iTable==iReg ); + }else{ + p->op2 = p->op; + p->op = TK_REGISTER; + p->iTable = iReg; + ExprClearProperty(p, EP_Skip); + } } /* @@ -113431,6 +114088,59 @@ static int exprCodeInlineFunction( return target; } +/* +** Expression Node callback for sqlite3ExprCanReturnSubtype(). +** +** Only a function call is able to return a subtype. So if the node +** is not a function call, return WRC_Prune immediately. +** +** A function call is able to return a subtype if it has the +** SQLITE_RESULT_SUBTYPE property. +** +** Assume that every function is able to pass-through a subtype from +** one of its argument (using sqlite3_result_value()). Most functions +** are not this way, but we don't have a mechanism to distinguish those +** that are from those that are not, so assume they all work this way. +** That means that if one of its arguments is another function and that +** other function is able to return a subtype, then this function is +** able to return a subtype. +*/ +static int exprNodeCanReturnSubtype(Walker *pWalker, Expr *pExpr){ + int n; + FuncDef *pDef; + sqlite3 *db; + if( pExpr->op!=TK_FUNCTION ){ + return WRC_Prune; + } + assert( ExprUseXList(pExpr) ); + db = pWalker->pParse->db; + n = ALWAYS(pExpr->x.pList) ? pExpr->x.pList->nExpr : 0; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( NEVER(pDef==0) || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ + pWalker->eCode = 1; + return WRC_Prune; + } + return WRC_Continue; +} + +/* +** Return TRUE if expression pExpr is able to return a subtype. +** +** A TRUE return does not guarantee that a subtype will be returned. +** It only indicates that a subtype return is possible. False positives +** are acceptable as they only disable an optimization. False negatives, +** on the other hand, can lead to incorrect answers. +*/ +static int sqlite3ExprCanReturnSubtype(Parse *pParse, Expr *pExpr){ + Walker w; + memset(&w, 0, sizeof(w)); + w.pParse = pParse; + w.xExprCallback = exprNodeCanReturnSubtype; + sqlite3WalkExpr(&w, pExpr); + return w.eCode; +} + + /* ** Check to see if pExpr is one of the indexed expressions on pParse->pIdxEpr. ** If it is, then resolve the expression by reading from the index and @@ -113463,6 +114173,17 @@ static SQLITE_NOINLINE int sqlite3IndexedExprLookup( continue; } + + /* Functions that might set a subtype should not be replaced by the + ** value taken from an expression index if they are themselves an + ** argument to another scalar function or aggregate. + ** https://sqlite.org/forum/forumpost/68d284c86b082c3e */ + if( ExprHasProperty(pExpr, EP_SubtArg) + && sqlite3ExprCanReturnSubtype(pParse, pExpr) + ){ + continue; + } + v = pParse->pVdbe; assert( v!=0 ); if( p->bMaybeNullRow ){ @@ -114264,7 +114985,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) break; } testcase( pX->op==TK_COLUMN ); - exprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); + sqlite3ExprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); testcase( regFree1==0 ); memset(&opCompare, 0, sizeof(opCompare)); opCompare.op = TK_EQ; @@ -114318,15 +115039,14 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) } assert( !ExprHasProperty(pExpr, EP_IntValue) ); if( pExpr->affExpr==OE_Ignore ){ - sqlite3VdbeAddOp4( - v, OP_Halt, SQLITE_OK, OE_Ignore, 0, pExpr->u.zToken,0); + sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_OK, OE_Ignore); VdbeCoverage(v); }else{ - sqlite3HaltConstraint(pParse, + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + sqlite3VdbeAddOp3(v, OP_Halt, pParse->pTriggerTab ? SQLITE_CONSTRAINT_TRIGGER : SQLITE_ERROR, - pExpr->affExpr, pExpr->u.zToken, 0, 0); + pExpr->affExpr, r1); } - break; } #endif @@ -114615,7 +115335,7 @@ static void exprCodeBetween( compRight.op = TK_LE; compRight.pLeft = pDel; compRight.pRight = pExpr->x.pList->a[1].pExpr; - exprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); + sqlite3ExprToRegister(pDel, exprCodeVector(pParse, pDel, ®Free1)); if( xJump ){ xJump(pParse, &exprAnd, dest, jumpIfNull); }else{ @@ -114994,16 +115714,23 @@ SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse *pParse, Expr *pExpr, int dest,i ** same as that currently bound to variable pVar, non-zero is returned. ** Otherwise, if the values are not the same or if pExpr is not a simple ** SQL value, zero is returned. +** +** If the SQLITE_EnableQPSG flag is set on the database connection, then +** this routine always returns false. */ -static int exprCompareVariable( +static SQLITE_NOINLINE int exprCompareVariable( const Parse *pParse, const Expr *pVar, const Expr *pExpr ){ - int res = 0; + int res = 2; int iVar; sqlite3_value *pL, *pR = 0; + if( pExpr->op==TK_VARIABLE && pVar->iColumn==pExpr->iColumn ){ + return 0; + } + if( (pParse->db->flags & SQLITE_EnableQPSG)!=0 ) return 2; sqlite3ValueFromExpr(pParse->db, pExpr, SQLITE_UTF8, SQLITE_AFF_BLOB, &pR); if( pR ){ iVar = pVar->iColumn; @@ -115013,12 +115740,11 @@ static int exprCompareVariable( if( sqlite3_value_type(pL)==SQLITE_TEXT ){ sqlite3_value_text(pL); /* Make sure the encoding is UTF-8 */ } - res = 0==sqlite3MemCompare(pL, pR, 0); + res = sqlite3MemCompare(pL, pR, 0) ? 2 : 0; } sqlite3ValueFree(pR); sqlite3ValueFree(pL); } - return res; } @@ -115044,12 +115770,10 @@ static int exprCompareVariable( ** just might result in some slightly slower code. But returning ** an incorrect 0 or 1 could lead to a malfunction. ** -** If pParse is not NULL then TK_VARIABLE terms in pA with bindings in -** pParse->pReprepare can be matched against literals in pB. The -** pParse->pVdbe->expmask bitmask is updated for each variable referenced. -** If pParse is NULL (the normal case) then any TK_VARIABLE term in -** Argument pParse should normally be NULL. If it is not NULL and pA or -** pB causes a return value of 2. +** If pParse is not NULL and SQLITE_EnableQPSG is off then TK_VARIABLE +** terms in pA with bindings in pParse->pReprepare can be matched against +** literals in pB. The pParse->pVdbe->expmask bitmask is updated for +** each variable referenced. */ SQLITE_PRIVATE int sqlite3ExprCompare( const Parse *pParse, @@ -115061,8 +115785,8 @@ SQLITE_PRIVATE int sqlite3ExprCompare( if( pA==0 || pB==0 ){ return pB==pA ? 0 : 2; } - if( pParse && pA->op==TK_VARIABLE && exprCompareVariable(pParse, pA, pB) ){ - return 0; + if( pParse && pA->op==TK_VARIABLE ){ + return exprCompareVariable(pParse, pA, pB); } combinedFlags = pA->flags | pB->flags; if( combinedFlags & EP_IntValue ){ @@ -115257,18 +115981,70 @@ static int exprImpliesNotNull( return 0; } +/* +** Return true if the boolean value of the expression is always either +** FALSE or NULL. +*/ +static int sqlite3ExprIsNotTrue(Expr *pExpr){ + int v; + if( pExpr->op==TK_NULL ) return 1; + if( pExpr->op==TK_TRUEFALSE && sqlite3ExprTruthValue(pExpr)==0 ) return 1; + v = 1; + if( sqlite3ExprIsInteger(pExpr, &v, 0) && v==0 ) return 1; + return 0; +} + +/* +** Return true if the expression is one of the following: +** +** CASE WHEN x THEN y END +** CASE WHEN x THEN y ELSE NULL END +** CASE WHEN x THEN y ELSE false END +** iif(x,y) +** iif(x,y,NULL) +** iif(x,y,false) +*/ +static int sqlite3ExprIsIIF(sqlite3 *db, const Expr *pExpr){ + ExprList *pList; + if( pExpr->op==TK_FUNCTION ){ + const char *z = pExpr->u.zToken; + FuncDef *pDef; + if( (z[0]!='i' && z[0]!='I') ) return 0; + if( pExpr->x.pList==0 ) return 0; + pDef = sqlite3FindFunction(db, z, pExpr->x.pList->nExpr, ENC(db), 0); +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + if( pDef==0 ) return 0; +#else + if( NEVER(pDef==0) ) return 0; +#endif + if( (pDef->funcFlags & SQLITE_FUNC_INLINE)==0 ) return 0; + if( SQLITE_PTR_TO_INT(pDef->pUserData)!=INLINEFUNC_iif ) return 0; + }else if( pExpr->op==TK_CASE ){ + if( pExpr->pLeft!=0 ) return 0; + }else{ + return 0; + } + pList = pExpr->x.pList; + assert( pList!=0 ); + if( pList->nExpr==2 ) return 1; + if( pList->nExpr==3 && sqlite3ExprIsNotTrue(pList->a[2].pExpr) ) return 1; + return 0; +} + /* ** Return true if we can prove the pE2 will always be true if pE1 is ** true. Return false if we cannot complete the proof or if pE2 might ** be false. Examples: ** -** pE1: x==5 pE2: x==5 Result: true -** pE1: x>0 pE2: x==5 Result: false -** pE1: x=21 pE2: x=21 OR y=43 Result: true -** pE1: x!=123 pE2: x IS NOT NULL Result: true -** pE1: x!=?1 pE2: x IS NOT NULL Result: true -** pE1: x IS NULL pE2: x IS NOT NULL Result: false -** pE1: x IS ?2 pE2: x IS NOT NULL Result: false +** pE1: x==5 pE2: x==5 Result: true +** pE1: x>0 pE2: x==5 Result: false +** pE1: x=21 pE2: x=21 OR y=43 Result: true +** pE1: x!=123 pE2: x IS NOT NULL Result: true +** pE1: x!=?1 pE2: x IS NOT NULL Result: true +** pE1: x IS NULL pE2: x IS NOT NULL Result: false +** pE1: x IS ?2 pE2: x IS NOT NULL Result: false +** pE1: iif(x,y) pE2: x Result: true +** PE1: iif(x,y,0) pE2: x Result: true ** ** When comparing TK_COLUMN nodes between pE1 and pE2, if pE2 has ** Expr.iTable<0 then assume a table number given by iTab. @@ -115302,6 +116078,9 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr( ){ return 1; } + if( sqlite3ExprIsIIF(pParse->db, pE1) ){ + return sqlite3ExprImpliesExpr(pParse,pE1->x.pList->a[0].pExpr,pE2,iTab); + } return 0; } @@ -117509,8 +118288,9 @@ static int renameResolveTrigger(Parse *pParse){ int i; for(i=0; ipFrom->nSrc && rc==SQLITE_OK; i++){ SrcItem *p = &pStep->pFrom->a[i]; - if( p->pSelect ){ - sqlite3SelectPrep(pParse, p->pSelect, 0); + if( p->fg.isSubquery ){ + assert( p->u4.pSubq!=0 ); + sqlite3SelectPrep(pParse, p->u4.pSubq->pSelect, 0); } } } @@ -117578,8 +118358,12 @@ static void renameWalkTrigger(Walker *pWalker, Trigger *pTrigger){ } if( pStep->pFrom ){ int i; - for(i=0; ipFrom->nSrc; i++){ - sqlite3WalkSelect(pWalker, pStep->pFrom->a[i].pSelect); + SrcList *pFrom = pStep->pFrom; + for(i=0; inSrc; i++){ + if( pFrom->a[i].fg.isSubquery ){ + assert( pFrom->a[i].u4.pSubq!=0 ); + sqlite3WalkSelect(pWalker, pFrom->a[i].u4.pSubq->pSelect); + } } } } @@ -117826,7 +118610,7 @@ static int renameTableSelectCb(Walker *pWalker, Select *pSelect){ } for(i=0; inSrc; i++){ SrcItem *pItem = &pSrc->a[i]; - if( pItem->pTab==p->pTab ){ + if( pItem->pSTab==p->pTab ){ renameTokenFind(pWalker->pParse, p, pItem->zName); } } @@ -120254,12 +121038,13 @@ static int loadStatTbl( while( sqlite3_step(pStmt)==SQLITE_ROW ){ int nIdxCol = 1; /* Number of columns in stat4 records */ - char *zIndex; /* Index name */ - Index *pIdx; /* Pointer to the index object */ - int nSample; /* Number of samples */ - int nByte; /* Bytes of space required */ - int i; /* Bytes of space required */ - tRowcnt *pSpace; + char *zIndex; /* Index name */ + Index *pIdx; /* Pointer to the index object */ + int nSample; /* Number of samples */ + i64 nByte; /* Bytes of space required */ + i64 i; /* Bytes of space required */ + tRowcnt *pSpace; /* Available allocated memory space */ + u8 *pPtr; /* Available memory as a u8 for easier manipulation */ zIndex = (char *)sqlite3_column_text(pStmt, 0); if( zIndex==0 ) continue; @@ -120279,7 +121064,7 @@ static int loadStatTbl( } pIdx->nSampleCol = nIdxCol; pIdx->mxSample = nSample; - nByte = sizeof(IndexSample) * nSample; + nByte = ROUND8(sizeof(IndexSample) * nSample); nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample; nByte += nIdxCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */ @@ -120288,7 +121073,10 @@ static int loadStatTbl( sqlite3_finalize(pStmt); return SQLITE_NOMEM_BKPT; } - pSpace = (tRowcnt*)&pIdx->aSample[nSample]; + pPtr = (u8*)pIdx->aSample; + pPtr += ROUND8(nSample*sizeof(pIdx->aSample[0])); + pSpace = (tRowcnt*)pPtr; + assert( EIGHT_BYTE_ALIGNMENT( pSpace ) ); pIdx->aAvgEq = pSpace; pSpace += nIdxCol; pIdx->pTable->tabFlags |= TF_HasStat4; for(i=0; iflags & SQLITE_AttachWrite)==0 ){ + flags &= ~(SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE); + flags |= SQLITE_OPEN_READONLY; + }else if( (db->flags & SQLITE_AttachCreate)==0 ){ + flags &= ~SQLITE_OPEN_CREATE; + } assert( pVfs ); flags |= SQLITE_OPEN_MAIN_DB; rc = sqlite3BtreeOpen(pVfs, zPath, db, &pNew->pBt, 0, flags); @@ -120704,15 +121498,6 @@ static void attachFunc( sqlite3BtreeLeaveAll(db); assert( zErrDyn==0 || rc!=SQLITE_OK ); } -#ifdef SQLITE_USER_AUTHENTICATION - if( rc==SQLITE_OK && !REOPEN_AS_MEMDB(db) ){ - u8 newAuth = 0; - rc = sqlite3UserAuthCheckLogin(db, zName, &newAuth); - if( newAuthauth.authLevel ){ - rc = SQLITE_AUTH_USER; - } - } -#endif if( rc ){ if( ALWAYS(!REOPEN_AS_MEMDB(db)) ){ int iDb = db->nDb - 1; @@ -120956,20 +121741,21 @@ static int fixSelectCb(Walker *p, Select *pSelect){ if( NEVER(pList==0) ) return WRC_Continue; for(i=0, pItem=pList->a; inSrc; i++, pItem++){ - if( pFix->bTemp==0 ){ - if( pItem->zDatabase ){ - if( iDb!=sqlite3FindDbName(db, pItem->zDatabase) ){ + if( pFix->bTemp==0 && pItem->fg.isSubquery==0 ){ + if( pItem->fg.fixedSchema==0 && pItem->u4.zDatabase!=0 ){ + if( iDb!=sqlite3FindDbName(db, pItem->u4.zDatabase) ){ sqlite3ErrorMsg(pFix->pParse, "%s %T cannot reference objects in database %s", - pFix->zType, pFix->pName, pItem->zDatabase); + pFix->zType, pFix->pName, pItem->u4.zDatabase); return WRC_Abort; } - sqlite3DbFree(db, pItem->zDatabase); - pItem->zDatabase = 0; + sqlite3DbFree(db, pItem->u4.zDatabase); pItem->fg.notCte = 1; + pItem->fg.hadSchema = 1; } - pItem->pSchema = pFix->pSchema; + pItem->u4.pSchema = pFix->pSchema; pItem->fg.fromDDL = 1; + pItem->fg.fixedSchema = 1; } #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) if( pList->a[i].fg.isUsing==0 @@ -121209,11 +121995,7 @@ SQLITE_PRIVATE int sqlite3AuthReadCol( int rc; /* Auth callback return code */ if( db->init.busy ) return SQLITE_OK; - rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext -#ifdef SQLITE_USER_AUTHENTICATION - ,db->auth.zAuthUser -#endif - ); + rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext); if( rc==SQLITE_DENY ){ char *z = sqlite3_mprintf("%s.%s", zTab, zCol); if( db->nDb>2 || iDb!=0 ) z = sqlite3_mprintf("%s.%z", zDb, z); @@ -121262,7 +122044,7 @@ SQLITE_PRIVATE void sqlite3AuthRead( assert( pTabList ); for(iSrc=0; iSrcnSrc; iSrc++){ if( pExpr->iTable==pTabList->a[iSrc].iCursor ){ - pTab = pTabList->a[iSrc].pTab; + pTab = pTabList->a[iSrc].pSTab; break; } } @@ -121320,11 +122102,7 @@ SQLITE_PRIVATE int sqlite3AuthCheck( testcase( zArg3==0 ); testcase( pParse->zAuthContext==0 ); - rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext -#ifdef SQLITE_USER_AUTHENTICATION - ,db->auth.zAuthUser -#endif - ); + rc = db->xAuth(db->pAuthArg,code,zArg1,zArg2,zArg3,pParse->zAuthContext); if( rc==SQLITE_DENY ){ sqlite3ErrorMsg(pParse, "not authorized"); pParse->rc = SQLITE_AUTH; @@ -121557,17 +122335,6 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ } sqlite3VdbeAddOp0(v, OP_Halt); -#if SQLITE_USER_AUTHENTICATION && !defined(SQLITE_OMIT_SHARED_CACHE) - if( pParse->nTableLock>0 && db->init.busy==0 ){ - sqlite3UserAuthInit(db); - if( db->auth.authLevelrc = SQLITE_AUTH_USER; - return; - } - } -#endif - /* The cookie mask contains one bit for each database file open. ** (Bit 0 is for main, bit 1 is for temp, and so forth.) Bits are ** set for each database that is used. Generate code to start a @@ -121696,16 +122463,6 @@ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){ pParse->nested--; } -#if SQLITE_USER_AUTHENTICATION -/* -** Return TRUE if zTable is the name of the system table that stores the -** list of users and their access credentials. -*/ -SQLITE_PRIVATE int sqlite3UserAuthTable(const char *zTable){ - return sqlite3_stricmp(zTable, "sqlite_user")==0; -} -#endif - /* ** Locate the in-memory structure that describes a particular database ** table given the name of that table and (optionally) the name of the @@ -121724,13 +122481,6 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const cha /* All mutexes are required for schema access. Make sure we hold them. */ assert( zDatabase!=0 || sqlite3BtreeHoldsAllMutexes(db) ); -#if SQLITE_USER_AUTHENTICATION - /* Only the admin user is allowed to know that the sqlite_user table - ** exists */ - if( db->auth.authLevelnDb; i++){ if( sqlite3StrICmp(zDatabase, db->aDb[i].zDbSName)==0 ) break; @@ -121865,12 +122615,12 @@ SQLITE_PRIVATE Table *sqlite3LocateTableItem( SrcItem *p ){ const char *zDb; - assert( p->pSchema==0 || p->zDatabase==0 ); - if( p->pSchema ){ - int iDb = sqlite3SchemaToIndex(pParse->db, p->pSchema); + if( p->fg.fixedSchema ){ + int iDb = sqlite3SchemaToIndex(pParse->db, p->u4.pSchema); zDb = pParse->db->aDb[iDb].zDbSName; }else{ - zDb = p->zDatabase; + assert( !p->fg.isSubquery ); + zDb = p->u4.zDatabase; } return sqlite3LocateTable(pParse, flags, p->zName, zDb); } @@ -124855,6 +125605,8 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, } assert( pParse->nErr==0 ); assert( pName->nSrc==1 ); + assert( pName->a[0].fg.fixedSchema==0 ); + assert( pName->a[0].fg.isSubquery==0 ); if( sqlite3ReadSchema(pParse) ) goto exit_drop_table; if( noErr ) db->suppressErr++; assert( isView==0 || isView==LOCATE_VIEW ); @@ -124863,7 +125615,7 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, if( pTab==0 ){ if( noErr ){ - sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); + sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].u4.zDatabase); sqlite3ForceNotReadOnly(pParse); } goto exit_drop_table; @@ -125387,9 +126139,6 @@ SQLITE_PRIVATE void sqlite3CreateIndex( if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 && db->init.busy==0 && pTblName!=0 -#if SQLITE_USER_AUTHENTICATION - && sqlite3UserAuthTable(pTab->zName)==0 -#endif ){ sqlite3ErrorMsg(pParse, "table %s may not be indexed", pTab->zName); goto exit_create_index; @@ -125954,15 +126703,17 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists } assert( pParse->nErr==0 ); /* Never called with prior non-OOM errors */ assert( pName->nSrc==1 ); + assert( pName->a[0].fg.fixedSchema==0 ); + assert( pName->a[0].fg.isSubquery==0 ); if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto exit_drop_index; } - pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].zDatabase); + pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].u4.zDatabase); if( pIndex==0 ){ if( !ifExists ){ sqlite3ErrorMsg(pParse, "no such index: %S", pName->a); }else{ - sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); + sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].u4.zDatabase); sqlite3ForceNotReadOnly(pParse); } pParse->checkSchema = 1; @@ -126086,7 +126837,6 @@ SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3 *db, IdList *pList){ int i; assert( db!=0 ); if( pList==0 ) return; - assert( pList->eU4!=EU4_EXPR ); /* EU4_EXPR mode is not currently used */ for(i=0; inId; i++){ sqlite3DbFree(db, pList->a[i].zName); } @@ -126259,12 +127009,14 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( if( pDatabase && pDatabase->z==0 ){ pDatabase = 0; } + assert( pItem->fg.fixedSchema==0 ); + assert( pItem->fg.isSubquery==0 ); if( pDatabase ){ pItem->zName = sqlite3NameFromToken(db, pDatabase); - pItem->zDatabase = sqlite3NameFromToken(db, pTable); + pItem->u4.zDatabase = sqlite3NameFromToken(db, pTable); }else{ pItem->zName = sqlite3NameFromToken(db, pTable); - pItem->zDatabase = 0; + pItem->u4.zDatabase = 0; } return pList; } @@ -126280,13 +127032,40 @@ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ for(i=0, pItem=pList->a; inSrc; i++, pItem++){ if( pItem->iCursor>=0 ) continue; pItem->iCursor = pParse->nTab++; - if( pItem->pSelect ){ - sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc); + if( pItem->fg.isSubquery ){ + assert( pItem->u4.pSubq!=0 ); + assert( pItem->u4.pSubq->pSelect!=0 ); + assert( pItem->u4.pSubq->pSelect->pSrc!=0 ); + sqlite3SrcListAssignCursors(pParse, pItem->u4.pSubq->pSelect->pSrc); } } } } +/* +** Delete a Subquery object and its substructure. +*/ +SQLITE_PRIVATE void sqlite3SubqueryDelete(sqlite3 *db, Subquery *pSubq){ + assert( pSubq!=0 && pSubq->pSelect!=0 ); + sqlite3SelectDelete(db, pSubq->pSelect); + sqlite3DbFree(db, pSubq); +} + +/* +** Remove a Subquery from a SrcItem. Return the associated Select object. +** The returned Select becomes the responsibility of the caller. +*/ +SQLITE_PRIVATE Select *sqlite3SubqueryDetach(sqlite3 *db, SrcItem *pItem){ + Select *pSel; + assert( pItem!=0 ); + assert( pItem->fg.isSubquery ); + pSel = pItem->u4.pSubq->pSelect; + sqlite3DbFree(db, pItem->u4.pSubq); + pItem->u4.pSubq = 0; + pItem->fg.isSubquery = 0; + return pSel; +} + /* ** Delete an entire SrcList including all its substructure. */ @@ -126296,13 +127075,24 @@ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ assert( db!=0 ); if( pList==0 ) return; for(pItem=pList->a, i=0; inSrc; i++, pItem++){ - if( pItem->zDatabase ) sqlite3DbNNFreeNN(db, pItem->zDatabase); + + /* Check invariants on SrcItem */ + assert( !pItem->fg.isIndexedBy || !pItem->fg.isTabFunc ); + assert( !pItem->fg.isCte || !pItem->fg.isIndexedBy ); + assert( !pItem->fg.fixedSchema || !pItem->fg.isSubquery ); + assert( !pItem->fg.isSubquery || (pItem->u4.pSubq!=0 && + pItem->u4.pSubq->pSelect!=0) ); + if( pItem->zName ) sqlite3DbNNFreeNN(db, pItem->zName); if( pItem->zAlias ) sqlite3DbNNFreeNN(db, pItem->zAlias); + if( pItem->fg.isSubquery ){ + sqlite3SubqueryDelete(db, pItem->u4.pSubq); + }else if( pItem->fg.fixedSchema==0 && pItem->u4.zDatabase!=0 ){ + sqlite3DbNNFreeNN(db, pItem->u4.zDatabase); + } if( pItem->fg.isIndexedBy ) sqlite3DbFree(db, pItem->u1.zIndexedBy); if( pItem->fg.isTabFunc ) sqlite3ExprListDelete(db, pItem->u1.pFuncArg); - sqlite3DeleteTable(db, pItem->pTab); - if( pItem->pSelect ) sqlite3SelectDelete(db, pItem->pSelect); + sqlite3DeleteTable(db, pItem->pSTab); if( pItem->fg.isUsing ){ sqlite3IdListDelete(db, pItem->u3.pUsing); }else if( pItem->u3.pOn ){ @@ -126312,6 +127102,54 @@ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ sqlite3DbNNFreeNN(db, pList); } +/* +** Attach a Subquery object to pItem->uv.pSubq. Set the +** pSelect value but leave all the other values initialized +** to zero. +** +** A copy of the Select object is made if dupSelect is true, and the +** SrcItem takes responsibility for deleting the copy. If dupSelect is +** false, ownership of the Select passes to the SrcItem. Either way, +** the SrcItem will take responsibility for deleting the Select. +** +** When dupSelect is zero, that means the Select might get deleted right +** away if there is an OOM error. Beware. +** +** Return non-zero on success. Return zero on an OOM error. +*/ +SQLITE_PRIVATE int sqlite3SrcItemAttachSubquery( + Parse *pParse, /* Parsing context */ + SrcItem *pItem, /* Item to which the subquery is to be attached */ + Select *pSelect, /* The subquery SELECT. Must be non-NULL */ + int dupSelect /* If true, attach a copy of pSelect, not pSelect itself.*/ +){ + Subquery *p; + assert( pSelect!=0 ); + assert( pItem->fg.isSubquery==0 ); + if( pItem->fg.fixedSchema ){ + pItem->u4.pSchema = 0; + pItem->fg.fixedSchema = 0; + }else if( pItem->u4.zDatabase!=0 ){ + sqlite3DbFree(pParse->db, pItem->u4.zDatabase); + pItem->u4.zDatabase = 0; + } + if( dupSelect ){ + pSelect = sqlite3SelectDup(pParse->db, pSelect, 0); + if( pSelect==0 ) return 0; + } + p = pItem->u4.pSubq = sqlite3DbMallocRawNN(pParse->db, sizeof(Subquery)); + if( p==0 ){ + sqlite3SelectDelete(pParse->db, pSelect); + return 0; + } + pItem->fg.isSubquery = 1; + p->pSelect = pSelect; + assert( offsetof(Subquery, pSelect)==0 ); + memset(((char*)p)+sizeof(p->pSelect), 0, sizeof(*p)-sizeof(p->pSelect)); + return 1; +} + + /* ** This routine is called by the parser to add a new term to the ** end of a growing FROM clause. The "p" parameter is the part of @@ -126361,10 +127199,12 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm( if( pAlias->n ){ pItem->zAlias = sqlite3NameFromToken(db, pAlias); } + assert( pSubquery==0 || pDatabase==0 ); if( pSubquery ){ - pItem->pSelect = pSubquery; - if( pSubquery->selFlags & SF_NestedFrom ){ - pItem->fg.isNestedFrom = 1; + if( sqlite3SrcItemAttachSubquery(pParse, pItem, pSubquery, 0) ){ + if( pSubquery->selFlags & SF_NestedFrom ){ + pItem->fg.isNestedFrom = 1; + } } } assert( pOnUsing==0 || pOnUsing->pOn==0 || pOnUsing->pUsing==0 ); @@ -127377,12 +128217,18 @@ static int matchQuality( u8 enc /* Desired text encoding */ ){ int match; - assert( p->nArg>=-1 ); + assert( p->nArg>=(-4) && p->nArg!=(-2) ); + assert( nArg>=(-2) ); /* Wrong number of arguments means "no match" */ if( p->nArg!=nArg ){ - if( nArg==(-2) ) return (p->xSFunc==0) ? 0 : FUNC_PERFECT_MATCH; + if( nArg==(-2) ) return p->xSFunc==0 ? 0 : FUNC_PERFECT_MATCH; if( p->nArg>=0 ) return 0; + /* Special p->nArg values available to built-in functions only: + ** -3 1 or more arguments required + ** -4 2 or more arguments required + */ + if( p->nArg<(-2) && nArg<(-2-p->nArg) ) return 0; } /* Give a better score to a function with a specific number of arguments @@ -127642,8 +128488,8 @@ SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){ ** ** The following fields are initialized appropriate in pSrc: ** -** pSrc->a[0].pTab Pointer to the Table object -** pSrc->a[0].pIndex Pointer to the INDEXED BY index, if there is one +** pSrc->a[0].spTab Pointer to the Table object +** pSrc->a[0].u2.pIBIndex Pointer to the INDEXED BY index, if there is one ** */ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ @@ -127651,8 +128497,8 @@ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ Table *pTab; assert( pItem && pSrc->nSrc>=1 ); pTab = sqlite3LocateTableItem(pParse, 0, pItem); - if( pItem->pTab ) sqlite3DeleteTable(pParse->db, pItem->pTab); - pItem->pTab = pTab; + if( pItem->pSTab ) sqlite3DeleteTable(pParse->db, pItem->pSTab); + pItem->pSTab = pTab; pItem->fg.notCte = 1; if( pTab ){ pTab->nTabRef++; @@ -127693,6 +128539,7 @@ SQLITE_PRIVATE void sqlite3CodeChangeCount(Vdbe *v, int regCounter, const char * ** is for a top-level SQL statement. */ static int vtabIsReadOnly(Parse *pParse, Table *pTab){ + assert( IsVirtual(pTab) ); if( sqlite3GetVTable(pParse->db, pTab)->pMod->pModule->xUpdate==0 ){ return 1; } @@ -127774,7 +128621,8 @@ SQLITE_PRIVATE void sqlite3MaterializeView( if( pFrom ){ assert( pFrom->nSrc==1 ); pFrom->a[0].zName = sqlite3DbStrDup(db, pView->zName); - pFrom->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); + assert( pFrom->a[0].fg.fixedSchema==0 && pFrom->a[0].fg.isSubquery==0 ); + pFrom->a[0].u4.zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); assert( pFrom->a[0].fg.isUsing==0 ); assert( pFrom->a[0].u3.pOn==0 ); } @@ -127836,7 +128684,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( ** ); */ - pTab = pSrc->a[0].pTab; + pTab = pSrc->a[0].pSTab; if( HasRowid(pTab) ){ pLhs = sqlite3PExpr(pParse, TK_ROW, 0, 0); pEList = sqlite3ExprListAppend( @@ -127869,9 +128717,9 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( /* duplicate the FROM clause as it is needed by both the DELETE/UPDATE tree ** and the SELECT subtree. */ - pSrc->a[0].pTab = 0; + pSrc->a[0].pSTab = 0; pSelectSrc = sqlite3SrcListDup(db, pSrc, 0); - pSrc->a[0].pTab = pTab; + pSrc->a[0].pSTab = pTab; if( pSrc->a[0].fg.isIndexedBy ){ assert( pSrc->a[0].fg.isCte==0 ); pSrc->a[0].u2.pIBIndex = 0; @@ -129003,7 +129851,6 @@ static void substrFunc( int len; int p0type; i64 p1, p2; - int negP2 = 0; assert( argc==3 || argc==2 ); if( sqlite3_value_type(argv[1])==SQLITE_NULL @@ -129012,7 +129859,7 @@ static void substrFunc( return; } p0type = sqlite3_value_type(argv[0]); - p1 = sqlite3_value_int(argv[1]); + p1 = sqlite3_value_int64(argv[1]); if( p0type==SQLITE_BLOB ){ len = sqlite3_value_bytes(argv[0]); z = sqlite3_value_blob(argv[0]); @@ -129037,19 +129884,18 @@ static void substrFunc( if( p1==0 ) p1 = 1; /* */ #endif if( argc==3 ){ - p2 = sqlite3_value_int(argv[2]); - if( p2<0 ){ - p2 = -p2; - negP2 = 1; - } + p2 = sqlite3_value_int64(argv[2]); }else{ p2 = sqlite3_context_db_handle(context)->aLimit[SQLITE_LIMIT_LENGTH]; } if( p1<0 ){ p1 += len; if( p1<0 ){ - p2 += p1; - if( p2<0 ) p2 = 0; + if( p2<0 ){ + p2 = 0; + }else{ + p2 += p1; + } p1 = 0; } }else if( p1>0 ){ @@ -129057,12 +129903,13 @@ static void substrFunc( }else if( p2>0 ){ p2--; } - if( negP2 ){ - p1 -= p2; - if( p1<0 ){ - p2 += p1; - p1 = 0; + if( p2<0 ){ + if( p2<-p1 ){ + p2 = p1; + }else{ + p2 = -p2; } + p1 -= p2; } assert( p1>=0 && p2>=0 ); if( p0type!=SQLITE_BLOB ){ @@ -129076,9 +129923,11 @@ static void substrFunc( sqlite3_result_text64(context, (char*)z, z2-z, SQLITE_TRANSIENT, SQLITE_UTF8); }else{ - if( p1+p2>len ){ + if( p1>=len ){ + p1 = p2 = 0; + }else if( p2>len-p1 ){ p2 = len-p1; - if( p2<0 ) p2 = 0; + assert( p2>0 ); } sqlite3_result_blob64(context, (char*)&z[p1], (u64)p2, SQLITE_TRANSIENT); } @@ -129089,13 +129938,13 @@ static void substrFunc( */ #ifndef SQLITE_OMIT_FLOATING_POINT static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - int n = 0; + i64 n = 0; double r; char *zBuf; assert( argc==1 || argc==2 ); if( argc==2 ){ if( SQLITE_NULL==sqlite3_value_type(argv[1]) ) return; - n = sqlite3_value_int(argv[1]); + n = sqlite3_value_int64(argv[1]); if( n>30 ) n = 30; if( n<0 ) n = 0; } @@ -129110,7 +129959,7 @@ static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ }else if( n==0 ){ r = (double)((sqlite_int64)(r+(r<0?-0.5:+0.5))); }else{ - zBuf = sqlite3_mprintf("%!.*f",n,r); + zBuf = sqlite3_mprintf("%!.*f",(int)n,r); if( zBuf==0 ){ sqlite3_result_error_nomem(context); return; @@ -130219,7 +131068,7 @@ static void concatFuncCore( for(i=0; icnt>0 ); p->cnt--; if( !p->approx ){ - p->iSum -= sqlite3_value_int64(argv[0]); + if( sqlite3SubInt64(&p->iSum, sqlite3_value_int64(argv[0])) ){ + p->ovrfl = 1; + p->approx = 1; + } }else if( type==SQLITE_INTEGER ){ i64 iVal = sqlite3_value_int64(argv[0]); if( iVal!=SMALLEST_INT64 ){ @@ -130698,7 +131550,11 @@ static void minMaxFinalize(sqlite3_context *context){ ** group_concat(EXPR, ?SEPARATOR?) ** string_agg(EXPR, SEPARATOR) ** -** The SEPARATOR goes before the EXPR string. This is tragic. The +** Content is accumulated in GroupConcatCtx.str with the SEPARATOR +** coming before the EXPR value, except for the first entry which +** omits the SEPARATOR. +** +** It is tragic that the SEPARATOR goes before the EXPR string. The ** groupConcatInverse() implementation would have been easier if the ** SEPARATOR were appended after EXPR. And the order is undocumented, ** so we could change it, in theory. But the old behavior has been @@ -130802,7 +131658,7 @@ static void groupConcatInverse( /* pGCC is always non-NULL since groupConcatStep() will have always ** run first to initialize it */ if( ALWAYS(pGCC) ){ - int nVS; + int nVS; /* Number of characters to remove */ /* Must call sqlite3_value_text() to convert the argument into text prior ** to invoking sqlite3_value_bytes(), in case the text encoding is UTF16 */ (void)sqlite3_value_text(argv[0]); @@ -131180,7 +132036,13 @@ static void signFunc( ** Implementation of fpdecode(x,y,z) function. ** ** x is a real number that is to be decoded. y is the precision. -** z is the maximum real precision. +** z is the maximum real precision. Return a string that shows the +** results of the sqlite3FpDecode() function. +** +** Used for testing and debugging only, specifically testing and debugging +** of the sqlite3FpDecode() function. This SQL function does not appear +** in production builds. This function is not an API and is subject to +** modification or removal in future versions of SQLite. */ static void fpdecodeFunc( sqlite3_context *context, @@ -131196,6 +132058,7 @@ static void fpdecodeFunc( x = sqlite3_value_double(argv[0]); y = sqlite3_value_int(argv[1]); z = sqlite3_value_int(argv[2]); + if( z<=0 ) z = 1; sqlite3FpDecode(&s, x, y, z); if( s.isSpecial==2 ){ sqlite3_snprintf(sizeof(zBuf), zBuf, "NaN"); @@ -131206,6 +132069,82 @@ static void fpdecodeFunc( } #endif /* SQLITE_DEBUG */ +#ifdef SQLITE_DEBUG +/* +** Implementation of parseuri(uri,flags) function. +** +** Required Arguments: +** "uri" The URI to parse. +** "flags" Bitmask of flags, as if to sqlite3_open_v2(). +** +** Additional arguments beyond the first two make calls to +** sqlite3_uri_key() for integers and sqlite3_uri_parameter for +** anything else. +** +** The result is a string showing the results of calling sqlite3ParseUri(). +** +** Used for testing and debugging only, specifically testing and debugging +** of the sqlite3ParseUri() function. This SQL function does not appear +** in production builds. This function is not an API and is subject to +** modification or removal in future versions of SQLite. +*/ +static void parseuriFunc( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv +){ + sqlite3_str *pResult; + const char *zVfs; + const char *zUri; + unsigned int flgs; + int rc; + sqlite3_vfs *pVfs = 0; + char *zFile = 0; + char *zErr = 0; + + if( argc<2 ) return; + pVfs = sqlite3_vfs_find(0); + assert( pVfs ); + zVfs = pVfs->zName; + zUri = (const char*)sqlite3_value_text(argv[0]); + if( zUri==0 ) return; + flgs = (unsigned int)sqlite3_value_int(argv[1]); + rc = sqlite3ParseUri(zVfs, zUri, &flgs, &pVfs, &zFile, &zErr); + pResult = sqlite3_str_new(0); + if( pResult ){ + int i; + sqlite3_str_appendf(pResult, "rc=%d", rc); + sqlite3_str_appendf(pResult, ", flags=0x%x", flgs); + sqlite3_str_appendf(pResult, ", vfs=%Q", pVfs ? pVfs->zName: 0); + sqlite3_str_appendf(pResult, ", err=%Q", zErr); + sqlite3_str_appendf(pResult, ", file=%Q", zFile); + if( zFile ){ + const char *z = zFile; + z += sqlite3Strlen30(z)+1; + while( z[0] ){ + sqlite3_str_appendf(pResult, ", %Q", z); + z += sqlite3Strlen30(z)+1; + } + for(i=2; ia; - pItem->pTab = pFKey->pFrom; + pItem->pSTab = pFKey->pFrom; pItem->zName = pFKey->pFrom->zName; - pItem->pTab->nTabRef++; + pItem->pSTab->nTabRef++; pItem->iCursor = pParse->nTab++; if( regNew!=0 ){ @@ -132737,7 +133669,8 @@ static Trigger *fkActionTrigger( SrcList *pSrc; Expr *pRaise; - pRaise = sqlite3Expr(db, TK_RAISE, "FOREIGN KEY constraint failed"); + pRaise = sqlite3Expr(db, TK_STRING, "FOREIGN KEY constraint failed"), + pRaise = sqlite3PExpr(pParse, TK_RAISE, pRaise, 0); if( pRaise ){ pRaise->affExpr = OE_Abort; } @@ -132745,7 +133678,8 @@ static Trigger *fkActionTrigger( if( pSrc ){ assert( pSrc->nSrc==1 ); pSrc->a[0].zName = sqlite3DbStrDup(db, zFrom); - pSrc->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); + assert( pSrc->a[0].fg.fixedSchema==0 && pSrc->a[0].fg.isSubquery==0 ); + pSrc->a[0].u4.zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); } pSelect = sqlite3SelectNew(pParse, sqlite3ExprListAppend(pParse, 0, pRaise), @@ -133479,8 +134413,11 @@ SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){ SQLITE_PRIVATE void sqlite3MultiValuesEnd(Parse *pParse, Select *pVal){ if( ALWAYS(pVal) && pVal->pSrc->nSrc>0 ){ SrcItem *pItem = &pVal->pSrc->a[0]; - sqlite3VdbeEndCoroutine(pParse->pVdbe, pItem->regReturn); - sqlite3VdbeJumpHere(pParse->pVdbe, pItem->addrFillSub - 1); + assert( (pItem->fg.isSubquery && pItem->u4.pSubq!=0) || pParse->nErr ); + if( pItem->fg.isSubquery ){ + sqlite3VdbeEndCoroutine(pParse->pVdbe, pItem->u4.pSubq->regReturn); + sqlite3VdbeJumpHere(pParse->pVdbe, pItem->u4.pSubq->addrFillSub - 1); + } } } @@ -133608,6 +134545,7 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList if( pRet ){ SelectDest dest; + Subquery *pSubq; pRet->pSrc->nSrc = 1; pRet->pPrior = pLeft->pPrior; pRet->op = pLeft->op; @@ -133617,28 +134555,32 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList assert( pLeft->pNext==0 ); assert( pRet->pNext==0 ); p = &pRet->pSrc->a[0]; - p->pSelect = pLeft; p->fg.viaCoroutine = 1; - p->addrFillSub = sqlite3VdbeCurrentAddr(v) + 1; - p->regReturn = ++pParse->nMem; p->iCursor = -1; + assert( !p->fg.isIndexedBy && !p->fg.isTabFunc ); p->u1.nRow = 2; - sqlite3VdbeAddOp3(v,OP_InitCoroutine,p->regReturn,0,p->addrFillSub); - sqlite3SelectDestInit(&dest, SRT_Coroutine, p->regReturn); - - /* Allocate registers for the output of the co-routine. Do so so - ** that there are two unused registers immediately before those - ** used by the co-routine. This allows the code in sqlite3Insert() - ** to use these registers directly, instead of copying the output - ** of the co-routine to a separate array for processing. */ - dest.iSdst = pParse->nMem + 3; - dest.nSdst = pLeft->pEList->nExpr; - pParse->nMem += 2 + dest.nSdst; - - pLeft->selFlags |= SF_MultiValue; - sqlite3Select(pParse, pLeft, &dest); - p->regResult = dest.iSdst; - assert( pParse->nErr || dest.iSdst>0 ); + if( sqlite3SrcItemAttachSubquery(pParse, p, pLeft, 0) ){ + pSubq = p->u4.pSubq; + pSubq->addrFillSub = sqlite3VdbeCurrentAddr(v) + 1; + pSubq->regReturn = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, + pSubq->regReturn, 0, pSubq->addrFillSub); + sqlite3SelectDestInit(&dest, SRT_Coroutine, pSubq->regReturn); + + /* Allocate registers for the output of the co-routine. Do so so + ** that there are two unused registers immediately before those + ** used by the co-routine. This allows the code in sqlite3Insert() + ** to use these registers directly, instead of copying the output + ** of the co-routine to a separate array for processing. */ + dest.iSdst = pParse->nMem + 3; + dest.nSdst = pLeft->pEList->nExpr; + pParse->nMem += 2 + dest.nSdst; + + pLeft->selFlags |= SF_MultiValue; + sqlite3Select(pParse, pLeft, &dest); + pSubq->regResult = dest.iSdst; + assert( pParse->nErr || dest.iSdst>0 ); + } pLeft = pRet; } }else{ @@ -133648,12 +134590,18 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList } if( pParse->nErr==0 ){ + Subquery *pSubq; assert( p!=0 ); - if( p->pSelect->pEList->nExpr!=pRow->nExpr ){ - sqlite3SelectWrongNumTermsError(pParse, p->pSelect); + assert( p->fg.isSubquery ); + pSubq = p->u4.pSubq; + assert( pSubq!=0 ); + assert( pSubq->pSelect!=0 ); + assert( pSubq->pSelect->pEList!=0 ); + if( pSubq->pSelect->pEList->nExpr!=pRow->nExpr ){ + sqlite3SelectWrongNumTermsError(pParse, pSubq->pSelect); }else{ - sqlite3ExprCodeExprList(pParse, pRow, p->regResult, 0, 0); - sqlite3VdbeAddOp1(pParse->pVdbe, OP_Yield, p->regReturn); + sqlite3ExprCodeExprList(pParse, pRow, pSubq->regResult, 0, 0); + sqlite3VdbeAddOp1(pParse->pVdbe, OP_Yield, pSubq->regReturn); } } sqlite3ExprListDelete(pParse->db, pRow); @@ -133807,6 +134755,7 @@ SQLITE_PRIVATE void sqlite3Insert( int regRowid; /* registers holding insert rowid */ int regData; /* register holding first column to insert */ int *aRegIdx = 0; /* One register allocated to each index */ + int *aTabColMap = 0; /* Mapping from pTab columns to pCol entries */ #ifndef SQLITE_OMIT_TRIGGER int isView; /* True if attempting to insert into a view */ @@ -133951,15 +134900,15 @@ SQLITE_PRIVATE void sqlite3Insert( */ bIdListInOrder = (pTab->tabFlags & (TF_OOOHidden|TF_HasStored))==0; if( pColumn ){ - assert( pColumn->eU4!=EU4_EXPR ); - pColumn->eU4 = EU4_IDX; - for(i=0; inId; i++){ - pColumn->a[i].u4.idx = -1; - } + aTabColMap = sqlite3DbMallocZero(db, pTab->nCol*sizeof(int)); + if( aTabColMap==0 ) goto insert_cleanup; for(i=0; inId; i++){ + const char *zCName = pColumn->a[i].zName; + u8 hName = sqlite3StrIHash(zCName); for(j=0; jnCol; j++){ - if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zCnName)==0 ){ - pColumn->a[i].u4.idx = j; + if( pTab->aCol[j].hName!=hName ) continue; + if( sqlite3StrICmp(zCName, pTab->aCol[j].zCnName)==0 ){ + if( aTabColMap[j]==0 ) aTabColMap[j] = i+1; if( i!=j ) bIdListInOrder = 0; if( j==pTab->iPKey ){ ipkColumn = i; assert( !withoutRowid ); @@ -134004,9 +134953,14 @@ SQLITE_PRIVATE void sqlite3Insert( && pSelect->pPrior==0 ){ SrcItem *pItem = &pSelect->pSrc->a[0]; - dest.iSDParm = pItem->regReturn; - regFromSelect = pItem->regResult; - nColumn = pItem->pSelect->pEList->nExpr; + Subquery *pSubq; + assert( pItem->fg.isSubquery ); + pSubq = pItem->u4.pSubq; + dest.iSDParm = pSubq->regReturn; + regFromSelect = pSubq->regResult; + assert( pSubq->pSelect!=0 ); + assert( pSubq->pSelect->pEList!=0 ); + nColumn = pSubq->pSelect->pEList->nExpr; ExplainQueryPlan((pParse, 0, "SCAN %S", pItem)); if( bIdListInOrder && nColumn==pTab->nCol ){ regData = regFromSelect; @@ -134276,9 +135230,9 @@ SQLITE_PRIVATE void sqlite3Insert( } } if( pColumn ){ - assert( pColumn->eU4==EU4_IDX ); - for(j=0; jnId && pColumn->a[j].u4.idx!=i; j++){} - if( j>=pColumn->nId ){ + j = aTabColMap[i]; + assert( j>=0 && j<=pColumn->nId ); + if( j==0 ){ /* A column not named in the insert column list gets its ** default value */ sqlite3ExprCodeFactorable(pParse, @@ -134286,7 +135240,7 @@ SQLITE_PRIVATE void sqlite3Insert( iRegStore); continue; } - k = j; + k = j - 1; }else if( nColumn==0 ){ /* This is INSERT INTO ... DEFAULT VALUES. Load the default value. */ sqlite3ExprCodeFactorable(pParse, @@ -134531,7 +135485,10 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3ExprListDelete(db, pList); sqlite3UpsertDelete(db, pUpsert); sqlite3SelectDelete(db, pSelect); - sqlite3IdListDelete(db, pColumn); + if( pColumn ){ + sqlite3IdListDelete(db, pColumn); + sqlite3DbFree(db, aTabColMap); + } if( aRegIdx ) sqlite3DbNNFreeNN(db, aRegIdx); } @@ -135926,7 +136883,7 @@ static int xferOptimization( if( pSelect->pSrc->nSrc!=1 ){ return 0; /* FROM clause must have exactly one term */ } - if( pSelect->pSrc->a[0].pSelect ){ + if( pSelect->pSrc->a[0].fg.isSubquery ){ return 0; /* FROM clause cannot contain a subquery */ } if( pSelect->pWhere ){ @@ -139869,12 +140826,6 @@ SQLITE_PRIVATE void sqlite3Pragma( ** in auto-commit mode. */ mask &= ~(SQLITE_ForeignKeys); } -#if SQLITE_USER_AUTHENTICATION - if( db->auth.authLevel==UAUTH_User ){ - /* Do not allow non-admin users to modify the schema arbitrarily */ - mask &= ~(SQLITE_WriteSchema); - } -#endif if( sqlite3GetBoolean(zRight, 0) ){ if( (mask & SQLITE_WriteSchema)==0 @@ -140010,7 +140961,8 @@ SQLITE_PRIVATE void sqlite3Pragma( char *zSql = sqlite3MPrintf(db, "SELECT*FROM\"%w\"", pTab->zName); if( zSql ){ sqlite3_stmt *pDummy = 0; - (void)sqlite3_prepare(db, zSql, -1, &pDummy, 0); + (void)sqlite3_prepare_v3(db, zSql, -1, SQLITE_PREPARE_DONT_LOG, + &pDummy, 0); (void)sqlite3_finalize(pDummy); sqlite3DbFree(db, zSql); } @@ -140486,11 +141438,12 @@ SQLITE_PRIVATE void sqlite3Pragma( /* Make sure sufficient number of registers have been allocated */ sqlite3TouchRegister(pParse, 8+cnt); + sqlite3VdbeAddOp3(v, OP_Null, 0, 8, 8+cnt); sqlite3ClearTempRegCache(pParse); /* Do the b-tree integrity checks */ sqlite3VdbeAddOp4(v, OP_IntegrityCk, 1, cnt, 8, (char*)aRoot,P4_INTARRAY); - sqlite3VdbeChangeP5(v, (u8)i); + sqlite3VdbeChangeP5(v, (u16)i); addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zDbSName), @@ -142110,14 +143063,7 @@ SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFl #else encoding = SQLITE_UTF8; #endif - if( db->nVdbeActive>0 && encoding!=ENC(db) - && (db->mDbFlags & DBFLAG_Vacuum)==0 - ){ - rc = SQLITE_LOCKED; - goto initone_error_out; - }else{ - sqlite3SetTextEncoding(db, encoding); - } + sqlite3SetTextEncoding(db, encoding); }else{ /* If opening an attached database, the encoding much match ENC(db) */ if( (meta[BTREE_TEXT_ENCODING-1] & 3)!=ENC(db) ){ @@ -142811,12 +143757,24 @@ static int sqlite3Prepare16( if( !sqlite3SafetyCheckOk(db)||zSql==0 ){ return SQLITE_MISUSE_BKPT; } + + /* Make sure nBytes is non-negative and correct. It should be the + ** number of bytes until the end of the input buffer or until the first + ** U+0000 character. If the input nBytes is odd, convert it into + ** an even number. If the input nBytes is negative, then the input + ** must be terminated by at least one U+0000 character */ if( nBytes>=0 ){ int sz; const char *z = (const char*)zSql; for(sz=0; szmutex); zSql8 = sqlite3Utf16to8(db, zSql, nBytes, SQLITE_UTF16NATIVE); if( zSql8 ){ @@ -142830,7 +143788,7 @@ static int sqlite3Prepare16( ** the same number of characters into the UTF-16 string. */ int chars_parsed = sqlite3Utf8CharLen(zSql8, (int)(zTail8-zSql8)); - *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, chars_parsed); + *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, nBytes, chars_parsed); } sqlite3DbFree(db, zSql8); rc = sqlite3ApiExit(db, rc); @@ -143224,11 +144182,13 @@ SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol){ */ SQLITE_PRIVATE void sqlite3SrcItemColumnUsed(SrcItem *pItem, int iCol){ assert( pItem!=0 ); - assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) ); + assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem) ); if( pItem->fg.isNestedFrom ){ ExprList *pResults; - assert( pItem->pSelect!=0 ); - pResults = pItem->pSelect->pEList; + assert( pItem->fg.isSubquery ); + assert( pItem->u4.pSubq!=0 ); + assert( pItem->u4.pSubq->pSelect!=0 ); + pResults = pItem->u4.pSubq->pSelect->pEList; assert( pResults!=0 ); assert( iCol>=0 && iColnExpr ); pResults->a[iCol].fg.bUsed = 1; @@ -143262,9 +144222,9 @@ static int tableAndColumnIndex( assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */ for(i=iStart; i<=iEnd; i++){ - iCol = sqlite3ColumnIndex(pSrc->a[i].pTab, zCol); + iCol = sqlite3ColumnIndex(pSrc->a[i].pSTab, zCol); if( iCol>=0 - && (bIgnoreHidden==0 || IsHiddenColumn(&pSrc->a[i].pTab->aCol[iCol])==0) + && (bIgnoreHidden==0 || IsHiddenColumn(&pSrc->a[i].pSTab->aCol[iCol])==0) ){ if( piTab ){ sqlite3SrcItemColumnUsed(&pSrc->a[i], iCol); @@ -143393,10 +144353,10 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ pLeft = &pSrc->a[0]; pRight = &pLeft[1]; for(i=0; inSrc-1; i++, pRight++, pLeft++){ - Table *pRightTab = pRight->pTab; + Table *pRightTab = pRight->pSTab; u32 joinType; - if( NEVER(pLeft->pTab==0 || pRightTab==0) ) continue; + if( NEVER(pLeft->pSTab==0 || pRightTab==0) ) continue; joinType = (pRight->fg.jointype & JT_OUTER)!=0 ? EP_OuterON : EP_InnerON; /* If this is a NATURAL join, synthesize an appropriate USING clause @@ -144269,12 +145229,18 @@ static void selectInnerLoop( ** case the order does matter */ pushOntoSorter( pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg); + pDest->iSDParm2 = 0; /* Signal that any Bloom filter is unpopulated */ }else{ int r1 = sqlite3GetTempReg(pParse); assert( sqlite3Strlen30(pDest->zAffSdst)==nResultCol ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult, nResultCol, r1, pDest->zAffSdst, nResultCol); sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol); + if( pDest->iSDParm2 ){ + sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pDest->iSDParm2, 0, + regResult, nResultCol); + ExplainQueryPlan((pParse, 0, "CREATE BLOOM FILTER")); + } sqlite3ReleaseTempReg(pParse, r1); } break; @@ -144816,8 +145782,12 @@ static const char *columnTypeImpl( SrcList *pTabList = pNC->pSrcList; for(j=0;jnSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++); if( jnSrc ){ - pTab = pTabList->a[j].pTab; - pS = pTabList->a[j].pSelect; + pTab = pTabList->a[j].pSTab; + if( pTabList->a[j].fg.isSubquery ){ + pS = pTabList->a[j].u4.pSubq->pSelect; + }else{ + pS = 0; + } }else{ pNC = pNC->pNext; } @@ -145384,7 +146354,7 @@ static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){ p->iLimit = iLimit = ++pParse->nMem; v = sqlite3GetVdbe(pParse); assert( v!=0 ); - if( sqlite3ExprIsInteger(pLimit->pLeft, &n) ){ + if( sqlite3ExprIsInteger(pLimit->pLeft, &n, pParse) ){ sqlite3VdbeAddOp2(v, OP_Integer, n, iLimit); VdbeComment((v, "LIMIT counter")); if( n==0 ){ @@ -145864,7 +146834,7 @@ static int multiSelect( p->pPrior = pPrior; p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow); if( p->pLimit - && sqlite3ExprIsInteger(p->pLimit->pLeft, &nLimit) + && sqlite3ExprIsInteger(p->pLimit->pLeft, &nLimit, pParse) && nLimit>0 && p->nSelectRow > sqlite3LogEst((u64)nLimit) ){ p->nSelectRow = sqlite3LogEst((u64)nLimit); @@ -146208,6 +147178,11 @@ static int generateOutputSubroutine( r1, pDest->zAffSdst, pIn->nSdst); sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pDest->iSDParm, r1, pIn->iSdst, pIn->nSdst); + if( pDest->iSDParm2>0 ){ + sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pDest->iSDParm2, 0, + pIn->iSdst, pIn->nSdst); + ExplainQueryPlan((pParse, 0, "CREATE BLOOM FILTER")); + } sqlite3ReleaseTempReg(pParse, r1); break; } @@ -146786,32 +147761,32 @@ static Expr *substExpr( if( pSubst->isOuterJoin ){ ExprSetProperty(pNew, EP_CanBeNull); } - if( ExprHasProperty(pExpr,EP_OuterON|EP_InnerON) ){ - sqlite3SetJoinExpr(pNew, pExpr->w.iJoin, - pExpr->flags & (EP_OuterON|EP_InnerON)); - } - sqlite3ExprDelete(db, pExpr); - pExpr = pNew; - if( pExpr->op==TK_TRUEFALSE ){ - pExpr->u.iValue = sqlite3ExprTruthValue(pExpr); - pExpr->op = TK_INTEGER; - ExprSetProperty(pExpr, EP_IntValue); + if( pNew->op==TK_TRUEFALSE ){ + pNew->u.iValue = sqlite3ExprTruthValue(pNew); + pNew->op = TK_INTEGER; + ExprSetProperty(pNew, EP_IntValue); } /* Ensure that the expression now has an implicit collation sequence, ** just as it did when it was a column of a view or sub-query. */ { - CollSeq *pNat = sqlite3ExprCollSeq(pSubst->pParse, pExpr); + CollSeq *pNat = sqlite3ExprCollSeq(pSubst->pParse, pNew); CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, pSubst->pCList->a[iColumn].pExpr ); - if( pNat!=pColl || (pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE) ){ - pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr, + if( pNat!=pColl || (pNew->op!=TK_COLUMN && pNew->op!=TK_COLLATE) ){ + pNew = sqlite3ExprAddCollateString(pSubst->pParse, pNew, (pColl ? pColl->zName : "BINARY") ); } } - ExprClearProperty(pExpr, EP_Collate); + ExprClearProperty(pNew, EP_Collate); + if( ExprHasProperty(pExpr,EP_OuterON|EP_InnerON) ){ + sqlite3SetJoinExpr(pNew, pExpr->w.iJoin, + pExpr->flags & (EP_OuterON|EP_InnerON)); + } + sqlite3ExprDelete(db, pExpr); + pExpr = pNew; } } }else{ @@ -146864,7 +147839,9 @@ static void substSelect( pSrc = p->pSrc; assert( pSrc!=0 ); for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ - substSelect(pSubst, pItem->pSelect, 1); + if( pItem->fg.isSubquery ){ + substSelect(pSubst, pItem->u4.pSubq->pSelect, 1); + } if( pItem->fg.isTabFunc ){ substExprList(pSubst, pItem->u1.pFuncArg); } @@ -146895,7 +147872,7 @@ static void recomputeColumnsUsed( SrcItem *pSrcItem /* Which FROM clause item to recompute */ ){ Walker w; - if( NEVER(pSrcItem->pTab==0) ) return; + if( NEVER(pSrcItem->pSTab==0) ) return; memset(&w, 0, sizeof(w)); w.xExprCallback = recomputeColumnsUsedExpr; w.xSelectCallback = sqlite3SelectWalkNoop; @@ -146935,8 +147912,10 @@ static void srclistRenumberCursors( aCsrMap[pItem->iCursor+1] = pParse->nTab++; } pItem->iCursor = aCsrMap[pItem->iCursor+1]; - for(p=pItem->pSelect; p; p=p->pPrior){ - srclistRenumberCursors(pParse, aCsrMap, p->pSrc, -1); + if( pItem->fg.isSubquery ){ + for(p=pItem->u4.pSubq->pSelect; p; p=p->pPrior){ + srclistRenumberCursors(pParse, aCsrMap, p->pSrc, -1); + } } } } @@ -147247,7 +148226,8 @@ static int flattenSubquery( assert( pSrc && iFrom>=0 && iFromnSrc ); pSubitem = &pSrc->a[iFrom]; iParent = pSubitem->iCursor; - pSub = pSubitem->pSelect; + assert( pSubitem->fg.isSubquery ); + pSub = pSubitem->u4.pSubq->pSelect; assert( pSub!=0 ); #ifndef SQLITE_OMIT_WINDOWFUNC @@ -147300,7 +148280,7 @@ static int flattenSubquery( */ if( (pSubitem->fg.jointype & (JT_OUTER|JT_LTORJ))!=0 ){ if( pSubSrc->nSrc>1 /* (3a) */ - || IsVirtual(pSubSrc->a[0].pTab) /* (3b) */ + || IsVirtual(pSubSrc->a[0].pSTab) /* (3b) */ || (p->selFlags & SF_Distinct)!=0 /* (3d) */ || (pSubitem->fg.jointype & JT_RIGHT)!=0 /* (26) */ ){ @@ -147386,14 +148366,18 @@ static int flattenSubquery( pParse->zAuthContext = zSavedAuthContext; /* Delete the transient structures associated with the subquery */ - pSub1 = pSubitem->pSelect; - sqlite3DbFree(db, pSubitem->zDatabase); + + if( ALWAYS(pSubitem->fg.isSubquery) ){ + pSub1 = sqlite3SubqueryDetach(db, pSubitem); + }else{ + pSub1 = 0; + } + assert( pSubitem->fg.isSubquery==0 ); + assert( pSubitem->fg.fixedSchema==0 ); sqlite3DbFree(db, pSubitem->zName); sqlite3DbFree(db, pSubitem->zAlias); - pSubitem->zDatabase = 0; pSubitem->zName = 0; pSubitem->zAlias = 0; - pSubitem->pSelect = 0; assert( pSubitem->fg.isUsing!=0 || pSubitem->u3.pOn==0 ); /* If the sub-query is a compound SELECT statement, then (by restrictions @@ -147434,8 +148418,8 @@ static int flattenSubquery( ExprList *pOrderBy = p->pOrderBy; Expr *pLimit = p->pLimit; Select *pPrior = p->pPrior; - Table *pItemTab = pSubitem->pTab; - pSubitem->pTab = 0; + Table *pItemTab = pSubitem->pSTab; + pSubitem->pSTab = 0; p->pOrderBy = 0; p->pPrior = 0; p->pLimit = 0; @@ -147443,7 +148427,7 @@ static int flattenSubquery( p->pLimit = pLimit; p->pOrderBy = pOrderBy; p->op = TK_ALL; - pSubitem->pTab = pItemTab; + pSubitem->pSTab = pItemTab; if( pNew==0 ){ p->pPrior = pPrior; }else{ @@ -147458,11 +148442,14 @@ static int flattenSubquery( TREETRACE(0x4,pParse,p,("compound-subquery flattener" " creates %u as peer\n",pNew->selId)); } - assert( pSubitem->pSelect==0 ); + assert( pSubitem->fg.isSubquery==0 ); } sqlite3DbFree(db, aCsrMap); if( db->mallocFailed ){ - pSubitem->pSelect = pSub1; + assert( pSubitem->fg.fixedSchema==0 ); + assert( pSubitem->fg.isSubquery==0 ); + assert( pSubitem->u4.zDatabase==0 ); + sqlite3SrcItemAttachSubquery(pParse, pSubitem, pSub1, 0); return 1; } @@ -147473,8 +148460,8 @@ static int flattenSubquery( ** ** pSubitem->pTab is always non-NULL by test restrictions and tests above. */ - if( ALWAYS(pSubitem->pTab!=0) ){ - Table *pTabToDel = pSubitem->pTab; + if( ALWAYS(pSubitem->pSTab!=0) ){ + Table *pTabToDel = pSubitem->pSTab; if( pTabToDel->nTabRef==1 ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); sqlite3ParserAddCleanup(pToplevel, sqlite3DeleteTableGeneric, pTabToDel); @@ -147482,7 +148469,7 @@ static int flattenSubquery( }else{ pTabToDel->nTabRef--; } - pSubitem->pTab = 0; + pSubitem->pSTab = 0; } /* The following loop runs once for each term in a compound-subquery @@ -147536,13 +148523,16 @@ static int flattenSubquery( /* Transfer the FROM clause terms from the subquery into the ** outer query. */ + iNewParent = pSubSrc->a[0].iCursor; for(i=0; ia[i+iFrom]; - if( pItem->fg.isUsing ) sqlite3IdListDelete(db, pItem->u3.pUsing); assert( pItem->fg.isTabFunc==0 ); + assert( pItem->fg.isSubquery + || pItem->fg.fixedSchema + || pItem->u4.zDatabase==0 ); + if( pItem->fg.isUsing ) sqlite3IdListDelete(db, pItem->u3.pUsing); *pItem = pSubSrc->a[i]; pItem->fg.jointype |= ltorj; - iNewParent = pSubSrc->a[i].iCursor; memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i])); } pSrc->a[iFrom].fg.jointype &= JT_LTORJ; @@ -147582,6 +148572,7 @@ static int flattenSubquery( pWhere = pSub->pWhere; pSub->pWhere = 0; if( isOuterJoin>0 ){ + assert( pSubSrc->nSrc==1 ); sqlite3SetJoinExpr(pWhere, iNewParent, EP_OuterON); } if( pWhere ){ @@ -147958,7 +148949,8 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ ** ** NAME AMBIGUITY ** -** This optimization is called the "WHERE-clause push-down optimization". +** This optimization is called the "WHERE-clause push-down optimization" +** or sometimes the "predicate push-down optimization". ** ** Do not confuse this optimization with another unrelated optimization ** with a similar name: The "MySQL push-down optimization" causes WHERE @@ -148222,10 +149214,10 @@ static int disableUnusedSubqueryResultColumns(SrcItem *pItem){ if( pItem->fg.isCorrelated || pItem->fg.isCte ){ return 0; } - assert( pItem->pTab!=0 ); - pTab = pItem->pTab; - assert( pItem->pSelect!=0 ); - pSub = pItem->pSelect; + assert( pItem->pSTab!=0 ); + pTab = pItem->pSTab; + assert( pItem->fg.isSubquery ); + pSub = pItem->u4.pSubq->pSelect; assert( pSub->pEList->nExpr==pTab->nCol ); for(pX=pSub; pX; pX=pX->pPrior){ if( (pX->selFlags & (SF_Distinct|SF_Aggregate))!=0 ){ @@ -148354,13 +149346,13 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ if( p->pWhere || p->pEList->nExpr!=1 || p->pSrc->nSrc!=1 - || p->pSrc->a[0].pSelect + || p->pSrc->a[0].fg.isSubquery || pAggInfo->nFunc!=1 || p->pHaving ){ return 0; } - pTab = p->pSrc->a[0].pTab; + pTab = p->pSrc->a[0].pSTab; assert( pTab!=0 ); assert( !IsView(pTab) ); if( !IsOrdinaryTable(pTab) ) return 0; @@ -148385,7 +149377,7 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ ** pFrom->pIndex and return SQLITE_OK. */ SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, SrcItem *pFrom){ - Table *pTab = pFrom->pTab; + Table *pTab = pFrom->pSTab; char *zIndexedBy = pFrom->u1.zIndexedBy; Index *pIdx; assert( pTab!=0 ); @@ -148462,7 +149454,11 @@ static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){ if( pNew==0 ) return WRC_Abort; memset(&dummy, 0, sizeof(dummy)); pNewSrc = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&dummy,pNew,0); - if( pNewSrc==0 ) return WRC_Abort; + assert( pNewSrc!=0 || pParse->nErr ); + if( pParse->nErr ){ + sqlite3SrcListDelete(db, pNewSrc); + return WRC_Abort; + } *pNew = *p; p->pSrc = pNewSrc; p->pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ASTERISK, 0)); @@ -148517,7 +149513,7 @@ static struct Cte *searchWith( ){ const char *zName = pItem->zName; With *p; - assert( pItem->zDatabase==0 ); + assert( pItem->fg.fixedSchema || pItem->u4.zDatabase==0 ); assert( zName!=0 ); for(p=pWith; p; p=p->pOuter){ int i; @@ -148587,7 +149583,7 @@ static int resolveFromTermToCte( Cte *pCte; /* Matched CTE (or NULL if no match) */ With *pWith; /* The matching WITH */ - assert( pFrom->pTab==0 ); + assert( pFrom->pSTab==0 ); if( pParse->pWith==0 ){ /* There are no WITH clauses in the stack. No match is possible */ return 0; @@ -148597,7 +149593,8 @@ static int resolveFromTermToCte( ** go no further. */ return 0; } - if( pFrom->zDatabase!=0 ){ + assert( pFrom->fg.hadSchema==0 || pFrom->fg.notCte!=0 ); + if( pFrom->fg.fixedSchema==0 && pFrom->u4.zDatabase!=0 ){ /* The FROM term contains a schema qualifier (ex: main.t1) and so ** it cannot possibly be a CTE reference. */ return 0; @@ -148633,7 +149630,7 @@ static int resolveFromTermToCte( } if( cannotBeFunction(pParse, pFrom) ) return 2; - assert( pFrom->pTab==0 ); + assert( pFrom->pSTab==0 ); pTab = sqlite3DbMallocZero(db, sizeof(Table)); if( pTab==0 ) return 2; pCteUse = pCte->pUse; @@ -148647,26 +149644,29 @@ static int resolveFromTermToCte( } pCteUse->eM10d = pCte->eM10d; } - pFrom->pTab = pTab; + pFrom->pSTab = pTab; pTab->nTabRef = 1; pTab->zName = sqlite3DbStrDup(db, pCte->zName); pTab->iPKey = -1; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid; - pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0); + sqlite3SrcItemAttachSubquery(pParse, pFrom, pCte->pSelect, 1); if( db->mallocFailed ) return 2; - pFrom->pSelect->selFlags |= SF_CopyCte; - assert( pFrom->pSelect ); + assert( pFrom->fg.isSubquery && pFrom->u4.pSubq ); + pSel = pFrom->u4.pSubq->pSelect; + assert( pSel!=0 ); + pSel->selFlags |= SF_CopyCte; if( pFrom->fg.isIndexedBy ){ sqlite3ErrorMsg(pParse, "no such index: \"%s\"", pFrom->u1.zIndexedBy); return 2; } + assert( !pFrom->fg.isIndexedBy ); pFrom->fg.isCte = 1; pFrom->u2.pCteUse = pCteUse; pCteUse->nUse++; /* Check if this is a recursive CTE. */ - pRecTerm = pSel = pFrom->pSelect; + pRecTerm = pSel; bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION ); while( bMayRecursive && pRecTerm->op==pSel->op ){ int i; @@ -148674,11 +149674,13 @@ static int resolveFromTermToCte( assert( pRecTerm->pPrior!=0 ); for(i=0; inSrc; i++){ SrcItem *pItem = &pSrc->a[i]; - if( pItem->zDatabase==0 - && pItem->zName!=0 + if( pItem->zName!=0 + && !pItem->fg.hadSchema + && ALWAYS( !pItem->fg.isSubquery ) + && (pItem->fg.fixedSchema || pItem->u4.zDatabase==0) && 0==sqlite3StrICmp(pItem->zName, pCte->zName) ){ - pItem->pTab = pTab; + pItem->pSTab = pTab; pTab->nTabRef++; pItem->fg.isRecursive = 1; if( pRecTerm->selFlags & SF_Recursive ){ @@ -148780,11 +149782,14 @@ SQLITE_PRIVATE void sqlite3SelectPopWith(Walker *pWalker, Select *p){ ** SQLITE_NOMEM. */ SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, SrcItem *pFrom){ - Select *pSel = pFrom->pSelect; + Select *pSel; Table *pTab; + assert( pFrom->fg.isSubquery ); + assert( pFrom->u4.pSubq!=0 ); + pSel = pFrom->u4.pSubq->pSelect; assert( pSel ); - pFrom->pTab = pTab = sqlite3DbMallocZero(pParse->db, sizeof(Table)); + pFrom->pSTab = pTab = sqlite3DbMallocZero(pParse->db, sizeof(Table)); if( pTab==0 ) return SQLITE_NOMEM; pTab->nTabRef = 1; if( pFrom->zAlias ){ @@ -148904,33 +149909,35 @@ static int selectExpander(Walker *pWalker, Select *p){ */ for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab; - assert( pFrom->fg.isRecursive==0 || pFrom->pTab!=0 ); - if( pFrom->pTab ) continue; + assert( pFrom->fg.isRecursive==0 || pFrom->pSTab!=0 ); + if( pFrom->pSTab ) continue; assert( pFrom->fg.isRecursive==0 ); if( pFrom->zName==0 ){ #ifndef SQLITE_OMIT_SUBQUERY - Select *pSel = pFrom->pSelect; + Select *pSel; + assert( pFrom->fg.isSubquery && pFrom->u4.pSubq!=0 ); + pSel = pFrom->u4.pSubq->pSelect; /* A sub-query in the FROM clause of a SELECT */ assert( pSel!=0 ); - assert( pFrom->pTab==0 ); + assert( pFrom->pSTab==0 ); if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort; if( sqlite3ExpandSubquery(pParse, pFrom) ) return WRC_Abort; #endif #ifndef SQLITE_OMIT_CTE }else if( (rc = resolveFromTermToCte(pParse, pWalker, pFrom))!=0 ){ if( rc>1 ) return WRC_Abort; - pTab = pFrom->pTab; + pTab = pFrom->pSTab; assert( pTab!=0 ); #endif }else{ /* An ordinary table or view name in the FROM clause */ - assert( pFrom->pTab==0 ); - pFrom->pTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom); + assert( pFrom->pSTab==0 ); + pFrom->pSTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom); if( pTab==0 ) return WRC_Abort; if( pTab->nTabRef>=0xffff ){ sqlite3ErrorMsg(pParse, "too many references to \"%s\": max 65535", pTab->zName); - pFrom->pTab = 0; + pFrom->pSTab = 0; return WRC_Abort; } pTab->nTabRef++; @@ -148942,7 +149949,7 @@ static int selectExpander(Walker *pWalker, Select *p){ i16 nCol; u8 eCodeOrig = pWalker->eCode; if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort; - assert( pFrom->pSelect==0 ); + assert( pFrom->fg.isSubquery==0 ); if( IsView(pTab) ){ if( (db->flags & SQLITE_EnableView)==0 && pTab->pSchema!=db->aDb[1].pSchema @@ -148950,7 +149957,7 @@ static int selectExpander(Walker *pWalker, Select *p){ sqlite3ErrorMsg(pParse, "access to view \"%s\" prohibited", pTab->zName); } - pFrom->pSelect = sqlite3SelectDup(db, pTab->u.view.pSelect, 0); + sqlite3SrcItemAttachSubquery(pParse, pFrom, pTab->u.view.pSelect, 1); } #ifndef SQLITE_OMIT_VIRTUALTABLE else if( ALWAYS(IsVirtual(pTab)) @@ -148966,7 +149973,9 @@ static int selectExpander(Walker *pWalker, Select *p){ nCol = pTab->nCol; pTab->nCol = -1; pWalker->eCode = 1; /* Turn on Select.selId renumbering */ - sqlite3WalkSelect(pWalker, pFrom->pSelect); + if( pFrom->fg.isSubquery ){ + sqlite3WalkSelect(pWalker, pFrom->u4.pSubq->pSelect); + } pWalker->eCode = eCodeOrig; pTab->nCol = nCol; } @@ -149053,7 +150062,7 @@ static int selectExpander(Walker *pWalker, Select *p){ } for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ int nAdd; /* Number of cols including rowid */ - Table *pTab = pFrom->pTab; /* Table for this data source */ + Table *pTab = pFrom->pSTab; /* Table for this data source */ ExprList *pNestedFrom; /* Result-set of a nested FROM clause */ char *zTabName; /* AS name for this data source */ const char *zSchemaName = 0; /* Schema name for this data source */ @@ -149064,10 +150073,11 @@ static int selectExpander(Walker *pWalker, Select *p){ zTabName = pTab->zName; } if( db->mallocFailed ) break; - assert( (int)pFrom->fg.isNestedFrom == IsNestedFrom(pFrom->pSelect) ); + assert( (int)pFrom->fg.isNestedFrom == IsNestedFrom(pFrom) ); if( pFrom->fg.isNestedFrom ){ - assert( pFrom->pSelect!=0 ); - pNestedFrom = pFrom->pSelect->pEList; + assert( pFrom->fg.isSubquery && pFrom->u4.pSubq ); + assert( pFrom->u4.pSubq->pSelect!=0 ); + pNestedFrom = pFrom->u4.pSubq->pSelect->pEList; assert( pNestedFrom!=0 ); assert( pNestedFrom->nExpr==pTab->nCol ); assert( VisibleRowid(pTab)==0 || ViewCanHaveRowid ); @@ -149306,14 +150316,12 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ assert( (p->selFlags & SF_Resolved) ); pTabList = p->pSrc; for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ - Table *pTab = pFrom->pTab; + Table *pTab = pFrom->pSTab; assert( pTab!=0 ); - if( (pTab->tabFlags & TF_Ephemeral)!=0 ){ + if( (pTab->tabFlags & TF_Ephemeral)!=0 && pFrom->fg.isSubquery ){ /* A sub-query in the FROM clause of a SELECT */ - Select *pSel = pFrom->pSelect; - if( pSel ){ - sqlite3SubqueryColumnTypes(pParse, pTab, pSel, SQLITE_AFF_NONE); - } + Select *pSel = pFrom->u4.pSubq->pSelect; + sqlite3SubqueryColumnTypes(pParse, pTab, pSel, SQLITE_AFF_NONE); } } } @@ -149627,6 +150635,7 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ ExprList *pList; assert( ExprUseXList(pF->pFExpr) ); + if( pParse->nErr ) return; pList = pF->pFExpr->x.pList; if( pF->iOBTab>=0 ){ /* For an ORDER BY aggregate, calls to OP_AggStep were deferred. Inputs @@ -149667,7 +150676,7 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ } sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3VdbeChangeP5(v, (u16)nArg); sqlite3VdbeAddOp2(v, OP_Next, pF->iOBTab, iTop+1); VdbeCoverage(v); sqlite3VdbeJumpHere(v, iTop); sqlite3ReleaseTempRange(pParse, regAgg, nArg); @@ -149830,12 +150839,13 @@ static void updateAccumulator( } sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3VdbeChangeP5(v, (u16)nArg); sqlite3ReleaseTempRange(pParse, regAgg, nArg); } if( addrNext ){ sqlite3VdbeResolveLabel(v, addrNext); } + if( pParse->nErr ) return; } if( regHit==0 && pAggInfo->nAccumulator ){ regHit = regAcc; @@ -149845,6 +150855,7 @@ static void updateAccumulator( } for(i=0, pC=pAggInfo->aCol; inAccumulator; i++, pC++){ sqlite3ExprCode(pParse, pC->pCExpr, AggInfoColumnReg(pAggInfo,i)); + if( pParse->nErr ) return; } pAggInfo->directMode = 0; @@ -149960,25 +150971,28 @@ static SrcItem *isSelfJoinView( int iFirst, int iEnd /* Range of FROM-clause entries to search. */ ){ SrcItem *pItem; - assert( pThis->pSelect!=0 ); - if( pThis->pSelect->selFlags & SF_PushDown ) return 0; + Select *pSel; + assert( pThis->fg.isSubquery ); + pSel = pThis->u4.pSubq->pSelect; + assert( pSel!=0 ); + if( pSel->selFlags & SF_PushDown ) return 0; while( iFirsta[iFirst++]; - if( pItem->pSelect==0 ) continue; + if( !pItem->fg.isSubquery ) continue; if( pItem->fg.viaCoroutine ) continue; if( pItem->zName==0 ) continue; - assert( pItem->pTab!=0 ); - assert( pThis->pTab!=0 ); - if( pItem->pTab->pSchema!=pThis->pTab->pSchema ) continue; + assert( pItem->pSTab!=0 ); + assert( pThis->pSTab!=0 ); + if( pItem->pSTab->pSchema!=pThis->pSTab->pSchema ) continue; if( sqlite3_stricmp(pItem->zName, pThis->zName)!=0 ) continue; - pS1 = pItem->pSelect; - if( pItem->pTab->pSchema==0 && pThis->pSelect->selId!=pS1->selId ){ + pS1 = pItem->u4.pSubq->pSelect; + if( pItem->pSTab->pSchema==0 && pSel->selId!=pS1->selId ){ /* The query flattener left two different CTE tables with identical ** names in the same FROM clause. */ continue; } - if( pItem->pSelect->selFlags & SF_PushDown ){ + if( pS1->selFlags & SF_PushDown ){ /* The view was modified by some other optimization such as ** pushDownWhereTerms() */ continue; @@ -150022,6 +151036,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ Expr *pExpr; Expr *pCount; sqlite3 *db; + SrcItem *pFrom; if( (p->selFlags & SF_Aggregate)==0 ) return 0; /* This is an aggregate */ if( p->pEList->nExpr!=1 ) return 0; /* Single result column */ if( p->pWhere ) return 0; @@ -150036,8 +151051,9 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ if( pExpr->x.pList!=0 ) return 0; /* Must be count(*) */ if( p->pSrc->nSrc!=1 ) return 0; /* One table in FROM */ if( ExprHasProperty(pExpr, EP_WinFunc) ) return 0;/* Not a window function */ - pSub = p->pSrc->a[0].pSelect; - if( pSub==0 ) return 0; /* The FROM is a subquery */ + pFrom = p->pSrc->a; + if( pFrom->fg.isSubquery==0 ) return 0; /* FROM is a subquery */ + pSub = pFrom->u4.pSubq->pSelect; if( pSub->pPrior==0 ) return 0; /* Must be a compound */ if( pSub->selFlags & SF_CopyCte ) return 0; /* Not a CTE */ do{ @@ -150046,7 +151062,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ if( pSub->pLimit ) return 0; /* No LIMIT clause */ if( pSub->selFlags & SF_Aggregate ) return 0; /* Not an aggregate */ assert( pSub->pHaving==0 ); /* Due to the previous */ - pSub = pSub->pPrior; /* Repeat over compound */ + pSub = pSub->pPrior; /* Repeat over compound */ }while( pSub ); /* If we reach this point then it is OK to perform the transformation */ @@ -150054,8 +151070,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ db = pParse->db; pCount = pExpr; pExpr = 0; - pSub = p->pSrc->a[0].pSelect; - p->pSrc->a[0].pSelect = 0; + pSub = sqlite3SubqueryDetach(db, pFrom); sqlite3SrcListDelete(db, p->pSrc); p->pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*p->pSrc)); while( pSub ){ @@ -150100,12 +151115,12 @@ static int sameSrcAlias(SrcItem *p0, SrcList *pSrc){ for(i=0; inSrc; i++){ SrcItem *p1 = &pSrc->a[i]; if( p1==p0 ) continue; - if( p0->pTab==p1->pTab && 0==sqlite3_stricmp(p0->zAlias, p1->zAlias) ){ + if( p0->pSTab==p1->pSTab && 0==sqlite3_stricmp(p0->zAlias, p1->zAlias) ){ return 1; } - if( p1->pSelect - && (p1->pSelect->selFlags & SF_NestedFrom)!=0 - && sameSrcAlias(p0, p1->pSelect->pSrc) + if( p1->fg.isSubquery + && (p1->u4.pSubq->pSelect->selFlags & SF_NestedFrom)!=0 + && sameSrcAlias(p0, p1->u4.pSubq->pSelect->pSrc) ){ return 1; } @@ -150170,13 +151185,13 @@ static int fromClauseTermCanBeCoroutine( if( i==0 ) break; i--; pItem--; - if( pItem->pSelect!=0 ) return 0; /* (1c-i) */ + if( pItem->fg.isSubquery ) return 0; /* (1c-i) */ } return 1; } /* -** Generate code for the SELECT statement given in the p argument. +** Generate byte-code for the SELECT statement given in the p argument. ** ** The results are returned according to the SelectDest structure. ** See comments in sqliteInt.h for further information. @@ -150187,6 +151202,40 @@ static int fromClauseTermCanBeCoroutine( ** ** This routine does NOT free the Select structure passed in. The ** calling function needs to do that. +** +** This is a long function. The following is an outline of the processing +** steps, with tags referencing various milestones: +** +** * Resolve names and similar preparation tag-select-0100 +** * Scan of the FROM clause tag-select-0200 +** + OUTER JOIN strength reduction tag-select-0220 +** + Sub-query ORDER BY removal tag-select-0230 +** + Query flattening tag-select-0240 +** * Separate subroutine for compound-SELECT tag-select-0300 +** * WHERE-clause constant propagation tag-select-0330 +** * Count()-of-VIEW optimization tag-select-0350 +** * Scan of the FROM clause again tag-select-0400 +** + Authorize unreferenced tables tag-select-0410 +** + Predicate push-down optimization tag-select-0420 +** + Omit unused subquery columns optimization tag-select-0440 +** + Generate code to implement subqueries tag-select-0480 +** - Co-routines tag-select-0482 +** - Reuse previously computed CTE tag-select-0484 +** - REuse previously computed VIEW tag-select-0486 +** - Materialize a VIEW or CTE tag-select-0488 +** * DISTINCT ORDER BY -> GROUP BY optimization tag-select-0500 +** * Set up for ORDER BY tag-select-0600 +** * Create output table tag-select-0630 +** * Prepare registers for LIMIT tag-select-0650 +** * Setup for DISTINCT tag-select-0680 +** * Generate code for non-aggregate and non-GROUP BY tag-select-0700 +** * Generate code for aggregate and/or GROUP BY tag-select-0800 +** + GROUP BY queries tag-select-0810 +** + non-GROUP BY queries tag-select-0820 +** - Special case of count() w/o GROUP BY tag-select-0821 +** - General case of non-GROUP BY aggregates tag-select-0822 +** * Sort results, as needed tag-select-0900 +** * Internal self-checks tag-select-1000 */ SQLITE_PRIVATE int sqlite3Select( Parse *pParse, /* The parser context */ @@ -150230,6 +151279,7 @@ SQLITE_PRIVATE int sqlite3Select( } #endif + /* tag-select-0100 */ assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistFifo ); assert( p->pOrderBy==0 || pDest->eDest!=SRT_Fifo ); assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistQueue ); @@ -150281,7 +151331,7 @@ SQLITE_PRIVATE int sqlite3Select( if( sameSrcAlias(p0, p->pSrc) ){ sqlite3ErrorMsg(pParse, "target object/alias may not appear in FROM clause: %s", - p0->zAlias ? p0->zAlias : p0->pTab->zName + p0->zAlias ? p0->zAlias : p0->pSTab->zName ); goto select_end; } @@ -150316,12 +151366,13 @@ SQLITE_PRIVATE int sqlite3Select( /* Try to do various optimizations (flattening subqueries, and strength ** reduction of join operators) in the FROM clause up into the main query + ** tag-select-0200 */ #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) for(i=0; !p->pPrior && inSrc; i++){ SrcItem *pItem = &pTabList->a[i]; - Select *pSub = pItem->pSelect; - Table *pTab = pItem->pTab; + Select *pSub = pItem->fg.isSubquery ? pItem->u4.pSubq->pSelect : 0; + Table *pTab = pItem->pSTab; /* The expander should have already created transient Table objects ** even for FROM clause elements such as subqueries that do not correspond @@ -150338,6 +151389,7 @@ SQLITE_PRIVATE int sqlite3Select( ** way that the i-th table cannot be the NULL row of a join, then ** perform the appropriate simplification. This is called ** "OUTER JOIN strength reduction" in the SQLite documentation. + ** tag-select-0220 */ if( (pItem->fg.jointype & (JT_LEFT|JT_LTORJ))!=0 && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor, @@ -150408,7 +151460,8 @@ SQLITE_PRIVATE int sqlite3Select( if( (pSub->selFlags & SF_Aggregate)!=0 ) continue; assert( pSub->pGroupBy==0 ); - /* If a FROM-clause subquery has an ORDER BY clause that is not + /* tag-select-0230: + ** If a FROM-clause subquery has an ORDER BY clause that is not ** really doing anything, then delete it now so that it does not ** interfere with query flattening. See the discussion at ** https://sqlite.org/forum/forumpost/2d76f2bcf65d256a @@ -150427,13 +151480,16 @@ SQLITE_PRIVATE int sqlite3Select( ** (a) The outer query has a different ORDER BY clause ** (b) The subquery is part of a join ** See forum post 062d576715d277c8 + ** (6) The subquery is not a recursive CTE. ORDER BY has a different + ** meaning for recursive CTEs and this optimization does not + ** apply. ** ** Also retain the ORDER BY if the OmitOrderBy optimization is disabled. */ if( pSub->pOrderBy!=0 && (p->pOrderBy!=0 || pTabList->nSrc>1) /* Condition (5) */ && pSub->pLimit==0 /* Condition (1) */ - && (pSub->selFlags & SF_OrderByReqd)==0 /* Condition (2) */ + && (pSub->selFlags & (SF_OrderByReqd|SF_Recursive))==0 /* (2) and (6) */ && (p->selFlags & SF_OrderByReqd)==0 /* Condition (3) and (4) */ && OptimizationEnabled(db, SQLITE_OmitOrderBy) ){ @@ -150471,6 +151527,7 @@ SQLITE_PRIVATE int sqlite3Select( continue; } + /* tag-select-0240 */ if( flattenSubquery(pParse, p, i, isAgg) ){ if( pParse->nErr ) goto select_end; /* This subquery can be absorbed into its parent. */ @@ -150486,7 +151543,7 @@ SQLITE_PRIVATE int sqlite3Select( #ifndef SQLITE_OMIT_COMPOUND_SELECT /* Handle compound SELECT statements using the separate multiSelect() - ** procedure. + ** procedure. tag-select-0300 */ if( p->pPrior ){ rc = multiSelect(pParse, p, pDest); @@ -150502,9 +151559,9 @@ SQLITE_PRIVATE int sqlite3Select( #endif /* Do the WHERE-clause constant propagation optimization if this is - ** a join. No need to speed time on this operation for non-join queries + ** a join. No need to spend time on this operation for non-join queries ** as the equivalent optimization will be handled by query planner in - ** sqlite3WhereBegin(). + ** sqlite3WhereBegin(). tag-select-0330 */ if( p->pWhere!=0 && p->pWhere->op==TK_AND @@ -150521,6 +151578,7 @@ SQLITE_PRIVATE int sqlite3Select( TREETRACE(0x2000,pParse,p,("Constant propagation not helpful\n")); } + /* tag-select-0350 */ if( OptimizationEnabled(db, SQLITE_QueryFlattener|SQLITE_CountOfView) && countOfViewOptimization(pParse, p) ){ @@ -150528,20 +151586,26 @@ SQLITE_PRIVATE int sqlite3Select( pTabList = p->pSrc; } - /* For each term in the FROM clause, do two things: - ** (1) Authorized unreferenced tables - ** (2) Generate code for all sub-queries + /* Loop over all terms in the FROM clause and do two things for each term: + ** + ** (1) Authorize unreferenced tables + ** (2) Generate code for all sub-queries + ** + ** tag-select-0400 */ for(i=0; inSrc; i++){ SrcItem *pItem = &pTabList->a[i]; SrcItem *pPrior; SelectDest dest; + Subquery *pSubq; Select *pSub; #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) const char *zSavedAuthContext; #endif - /* Issue SQLITE_READ authorizations with a fake column name for any + /* Authorized unreferenced tables. tag-select-0410 + ** + ** Issue SQLITE_READ authorizations with a fake column name for any ** tables that are referenced but from which no values are extracted. ** Examples of where these kinds of null SQLITE_READ authorizations ** would occur: @@ -150558,17 +151622,28 @@ SQLITE_PRIVATE int sqlite3Select( ** string for the fake column name seems safer. */ if( pItem->colUsed==0 && pItem->zName!=0 ){ - sqlite3AuthCheck(pParse, SQLITE_READ, pItem->zName, "", pItem->zDatabase); + const char *zDb; + if( pItem->fg.fixedSchema ){ + int iDb = sqlite3SchemaToIndex(pParse->db, pItem->u4.pSchema); + zDb = db->aDb[iDb].zDbSName; + }else if( pItem->fg.isSubquery ){ + zDb = 0; + }else{ + zDb = pItem->u4.zDatabase; + } + sqlite3AuthCheck(pParse, SQLITE_READ, pItem->zName, "", zDb); } #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* Generate code for all sub-queries in the FROM clause */ - pSub = pItem->pSelect; - if( pSub==0 || pItem->addrFillSub!=0 ) continue; + if( pItem->fg.isSubquery==0 ) continue; + pSubq = pItem->u4.pSubq; + assert( pSubq!=0 ); + pSub = pSubq->pSelect; /* The code for a subquery should only be generated once. */ - assert( pItem->addrFillSub==0 ); + if( pSubq->addrFillSub!=0 ) continue; /* Increment Parse.nHeight by the height of the largest expression ** tree referred to by this, the parent select. The child select @@ -150581,6 +151656,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Make copies of constant WHERE-clause terms in the outer query down ** inside the subquery. This can help the subquery to run more efficiently. + ** This is the "predicate push-down optimization". tag-select-0420 */ if( OptimizationEnabled(db, SQLITE_PushDown) && (pItem->fg.isCte==0 @@ -150594,13 +151670,14 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3TreeViewSelect(0, p, 0); } #endif - assert( pItem->pSelect && (pItem->pSelect->selFlags & SF_PushDown)!=0 ); + assert( pSubq->pSelect && (pSub->selFlags & SF_PushDown)!=0 ); }else{ - TREETRACE(0x4000,pParse,p,("WHERE-lcause push-down not possible\n")); + TREETRACE(0x4000,pParse,p,("WHERE-clause push-down not possible\n")); } /* Convert unused result columns of the subquery into simple NULL ** expressions, to avoid unneeded searching and computation. + ** tag-select-0440 */ if( OptimizationEnabled(db, SQLITE_NullUnusedCols) && disableUnusedSubqueryResultColumns(pItem) @@ -150618,32 +151695,33 @@ SQLITE_PRIVATE int sqlite3Select( zSavedAuthContext = pParse->zAuthContext; pParse->zAuthContext = pItem->zName; - /* Generate code to implement the subquery + /* Generate byte-code to implement the subquery tag-select-0480 */ if( fromClauseTermCanBeCoroutine(pParse, pTabList, i, p->selFlags) ){ /* Implement a co-routine that will return a single row of the result - ** set on each invocation. + ** set on each invocation. tag-select-0482 */ int addrTop = sqlite3VdbeCurrentAddr(v)+1; - pItem->regReturn = ++pParse->nMem; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop); + pSubq->regReturn = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, pSubq->regReturn, 0, addrTop); VdbeComment((v, "%!S", pItem)); - pItem->addrFillSub = addrTop; - sqlite3SelectDestInit(&dest, SRT_Coroutine, pItem->regReturn); + pSubq->addrFillSub = addrTop; + sqlite3SelectDestInit(&dest, SRT_Coroutine, pSubq->regReturn); ExplainQueryPlan((pParse, 1, "CO-ROUTINE %!S", pItem)); sqlite3Select(pParse, pSub, &dest); - pItem->pTab->nRowLogEst = pSub->nSelectRow; + pItem->pSTab->nRowLogEst = pSub->nSelectRow; pItem->fg.viaCoroutine = 1; - pItem->regResult = dest.iSdst; - sqlite3VdbeEndCoroutine(v, pItem->regReturn); + pSubq->regResult = dest.iSdst; + sqlite3VdbeEndCoroutine(v, pSubq->regReturn); + VdbeComment((v, "end %!S", pItem)); sqlite3VdbeJumpHere(v, addrTop-1); sqlite3ClearTempRegCache(pParse); }else if( pItem->fg.isCte && pItem->u2.pCteUse->addrM9e>0 ){ /* This is a CTE for which materialization code has already been ** generated. Invoke the subroutine to compute the materialization, - ** the make the pItem->iCursor be a copy of the ephemeral table that - ** holds the result of the materialization. */ + ** then make the pItem->iCursor be a copy of the ephemeral table that + ** holds the result of the materialization. tag-select-0484 */ CteUse *pCteUse = pItem->u2.pCteUse; sqlite3VdbeAddOp2(v, OP_Gosub, pCteUse->regRtn, pCteUse->addrM9e); if( pItem->iCursor!=pCteUse->iCur ){ @@ -150653,25 +151731,30 @@ SQLITE_PRIVATE int sqlite3Select( pSub->nSelectRow = pCteUse->nRowEst; }else if( (pPrior = isSelfJoinView(pTabList, pItem, 0, i))!=0 ){ /* This view has already been materialized by a prior entry in - ** this same FROM clause. Reuse it. */ - if( pPrior->addrFillSub ){ - sqlite3VdbeAddOp2(v, OP_Gosub, pPrior->regReturn, pPrior->addrFillSub); + ** this same FROM clause. Reuse it. tag-select-0486 */ + Subquery *pPriorSubq; + assert( pPrior->fg.isSubquery ); + pPriorSubq = pPrior->u4.pSubq; + assert( pPriorSubq!=0 ); + if( pPriorSubq->addrFillSub ){ + sqlite3VdbeAddOp2(v, OP_Gosub, pPriorSubq->regReturn, + pPriorSubq->addrFillSub); } sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pPrior->iCursor); - pSub->nSelectRow = pPrior->pSelect->nSelectRow; + pSub->nSelectRow = pPriorSubq->pSelect->nSelectRow; }else{ /* Materialize the view. If the view is not correlated, generate a ** subroutine to do the materialization so that subsequent uses of - ** the same view can reuse the materialization. */ + ** the same view can reuse the materialization. tag-select-0488 */ int topAddr; int onceAddr = 0; #ifdef SQLITE_ENABLE_STMT_SCANSTATUS int addrExplain; #endif - pItem->regReturn = ++pParse->nMem; + pSubq->regReturn = ++pParse->nMem; topAddr = sqlite3VdbeAddOp0(v, OP_Goto); - pItem->addrFillSub = topAddr+1; + pSubq->addrFillSub = topAddr+1; pItem->fg.isMaterialized = 1; if( pItem->fg.isCorrelated==0 ){ /* If the subquery is not correlated and if we are not inside of @@ -150686,17 +151769,17 @@ SQLITE_PRIVATE int sqlite3Select( ExplainQueryPlan2(addrExplain, (pParse, 1, "MATERIALIZE %!S", pItem)); sqlite3Select(pParse, pSub, &dest); - pItem->pTab->nRowLogEst = pSub->nSelectRow; + pItem->pSTab->nRowLogEst = pSub->nSelectRow; if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr); - sqlite3VdbeAddOp2(v, OP_Return, pItem->regReturn, topAddr+1); + sqlite3VdbeAddOp2(v, OP_Return, pSubq->regReturn, topAddr+1); VdbeComment((v, "end %!S", pItem)); sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1); sqlite3VdbeJumpHere(v, topAddr); sqlite3ClearTempRegCache(pParse); if( pItem->fg.isCte && pItem->fg.isCorrelated==0 ){ CteUse *pCteUse = pItem->u2.pCteUse; - pCteUse->addrM9e = pItem->addrFillSub; - pCteUse->regRtn = pItem->regReturn; + pCteUse->addrM9e = pSubq->addrFillSub; + pCteUse->regRtn = pSubq->regReturn; pCteUse->iCur = pItem->iCursor; pCteUse->nRowEst = pSub->nSelectRow; } @@ -150722,7 +151805,9 @@ SQLITE_PRIVATE int sqlite3Select( } #endif - /* If the query is DISTINCT with an ORDER BY but is not an aggregate, and + /* tag-select-0500 + ** + ** If the query is DISTINCT with an ORDER BY but is not an aggregate, and ** if the select-list is the same as the ORDER BY list, then this query ** can be rewritten as a GROUP BY. In other words, this: ** @@ -150739,12 +151824,18 @@ SQLITE_PRIVATE int sqlite3Select( */ if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct && sqlite3ExprListCompare(sSort.pOrderBy, pEList, -1)==0 + && OptimizationEnabled(db, SQLITE_GroupByOrder) #ifndef SQLITE_OMIT_WINDOWFUNC && p->pWin==0 #endif ){ p->selFlags &= ~SF_Distinct; pGroupBy = p->pGroupBy = sqlite3ExprListDup(db, pEList, 0); + if( pGroupBy ){ + for(i=0; inExpr; i++){ + pGroupBy->a[i].u.x.iOrderByCol = i+1; + } + } p->selFlags |= SF_Aggregate; /* Notice that even thought SF_Distinct has been cleared from p->selFlags, ** the sDistinct.isTnct is still set. Hence, isTnct represents the @@ -150766,7 +151857,7 @@ SQLITE_PRIVATE int sqlite3Select( ** If that is the case, then the OP_OpenEphemeral instruction will be ** changed to an OP_Noop once we figure out that the sorting index is ** not needed. The sSort.addrSortIndex variable is used to facilitate - ** that change. + ** that change. tag-select-0600 */ if( sSort.pOrderBy ){ KeyInfo *pKeyInfo; @@ -150783,6 +151874,7 @@ SQLITE_PRIVATE int sqlite3Select( } /* If the output is destined for a temporary table, open that table. + ** tag-select-0630 */ if( pDest->eDest==SRT_EphemTab ){ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pDest->iSDParm, pEList->nExpr); @@ -150800,7 +151892,7 @@ SQLITE_PRIVATE int sqlite3Select( } } - /* Set the limiter. + /* Set the limiter. tag-select-0650 */ iEnd = sqlite3VdbeMakeLabel(pParse); if( (p->selFlags & SF_FixedLimit)==0 ){ @@ -150812,7 +151904,7 @@ SQLITE_PRIVATE int sqlite3Select( sSort.sortFlags |= SORTFLAG_UseSorter; } - /* Open an ephemeral index to use for the distinct set. + /* Open an ephemeral index to use for the distinct set. tag-select-0680 */ if( p->selFlags & SF_Distinct ){ sDistinct.tabTnct = pParse->nTab++; @@ -150827,7 +151919,7 @@ SQLITE_PRIVATE int sqlite3Select( } if( !isAgg && pGroupBy==0 ){ - /* No aggregate functions and no GROUP BY clause */ + /* No aggregate functions and no GROUP BY clause. tag-select-0700 */ u16 wctrlFlags = (sDistinct.isTnct ? WHERE_WANT_DISTINCT : 0) | (p->selFlags & SF_FixedLimit); #ifndef SQLITE_OMIT_WINDOWFUNC @@ -150900,8 +151992,8 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3WhereEnd(pWInfo); } }else{ - /* This case when there exist aggregate functions or a GROUP BY clause - ** or both */ + /* This case is for when there exist aggregate functions or a GROUP BY + ** clause or both. tag-select-0800 */ NameContext sNC; /* Name context for processing aggregate information */ int iAMem; /* First Mem address for storing current GROUP BY */ int iBMem; /* First Mem address for previous GROUP BY */ @@ -151020,7 +152112,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Processing for aggregates with GROUP BY is very different and - ** much more complex than aggregates without a GROUP BY. + ** much more complex than aggregates without a GROUP BY. tag-select-0810 */ if( pGroupBy ){ KeyInfo *pKeyInfo; /* Keying information for the group by clause */ @@ -151207,12 +152299,25 @@ SQLITE_PRIVATE int sqlite3Select( sortOut, sortPTab); } for(j=0; jnExpr; j++){ + int iOrderByCol = pGroupBy->a[j].u.x.iOrderByCol; + if( groupBySort ){ sqlite3VdbeAddOp3(v, OP_Column, sortPTab, j, iBMem+j); }else{ pAggInfo->directMode = 1; sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr, iBMem+j); } + + if( iOrderByCol ){ + Expr *pX = p->pEList->a[iOrderByCol-1].pExpr; + Expr *pBase = sqlite3ExprSkipCollateAndLikely(pX); + if( ALWAYS(pBase!=0) + && pBase->op!=TK_AGG_COLUMN + && pBase->op!=TK_REGISTER + ){ + sqlite3ExprToRegister(pX, iAMem+j); + } + } } sqlite3VdbeAddOp4(v, OP_Compare, iAMem, iBMem, pGroupBy->nExpr, (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO); @@ -151228,9 +152333,9 @@ SQLITE_PRIVATE int sqlite3Select( ** and resets the aggregate accumulator registers in preparation ** for the next GROUP BY batch. */ - sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr); sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); VdbeComment((v, "output one row")); + sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr); sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd); VdbeCoverage(v); VdbeComment((v, "check abort flag")); sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); @@ -151304,9 +152409,12 @@ SQLITE_PRIVATE int sqlite3Select( } } /* endif pGroupBy. Begin aggregate queries without GROUP BY: */ else { + /* Aggregate functions without GROUP BY. tag-select-0820 */ Table *pTab; if( (pTab = isSimpleCount(p, pAggInfo))!=0 ){ - /* If isSimpleCount() returns a pointer to a Table structure, then + /* tag-select-0821 + ** + ** If isSimpleCount() returns a pointer to a Table structure, then ** the SQL statement is of the form: ** ** SELECT count(*) FROM @@ -151365,6 +152473,8 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp1(v, OP_Close, iCsr); explainSimpleCount(pParse, pTab, pBest); }else{ + /* The general case of an aggregate query without GROUP BY + ** tag-select-0822 */ int regAcc = 0; /* "populate accumulators" flag */ ExprList *pDistinct = 0; u16 distFlag = 0; @@ -151453,7 +152563,7 @@ SQLITE_PRIVATE int sqlite3Select( } /* If there is an ORDER BY clause, then we need to sort the results - ** and send them to the callback one by one. + ** and send them to the callback one by one. tag-select-0900 */ if( sSort.pOrderBy ){ assert( p->pEList==pEList ); @@ -151476,6 +152586,7 @@ SQLITE_PRIVATE int sqlite3Select( assert( db->mallocFailed==0 || pParse->nErr!=0 ); sqlite3ExprListDelete(db, pMinMaxOrderBy); #ifdef SQLITE_DEBUG + /* Internal self-checks. tag-select-1000 */ if( pAggInfo && !db->mallocFailed ){ #if TREETRACE_ENABLED if( sqlite3TreeTrace & 0x20 ){ @@ -151865,8 +152976,10 @@ SQLITE_PRIVATE void sqlite3BeginTrigger( ** name on pTableName if we are reparsing out of the schema table */ if( db->init.busy && iDb!=1 ){ - sqlite3DbFree(db, pTableName->a[0].zDatabase); - pTableName->a[0].zDatabase = 0; + assert( pTableName->a[0].fg.fixedSchema==0 ); + assert( pTableName->a[0].fg.isSubquery==0 ); + sqlite3DbFree(db, pTableName->a[0].u4.zDatabase); + pTableName->a[0].u4.zDatabase = 0; } /* If the trigger name was unqualified, and the table is a temp table, @@ -152344,7 +153457,8 @@ SQLITE_PRIVATE void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr) } assert( pName->nSrc==1 ); - zDb = pName->a[0].zDatabase; + assert( pName->a[0].fg.fixedSchema==0 && pName->a[0].fg.isSubquery==0 ); + zDb = pName->a[0].u4.zDatabase; zName = pName->a[0].zName; assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) ); for(i=OMIT_TEMPDB; inDb; i++){ @@ -152581,7 +153695,9 @@ SQLITE_PRIVATE SrcList *sqlite3TriggerStepSrc( Schema *pSchema = pStep->pTrig->pSchema; pSrc->a[0].zName = zName; if( pSchema!=db->aDb[1].pSchema ){ - pSrc->a[0].pSchema = pSchema; + assert( pSrc->a[0].fg.fixedSchema || pSrc->a[0].u4.zDatabase==0 ); + pSrc->a[0].u4.pSchema = pSchema; + pSrc->a[0].fg.fixedSchema = 1; } if( pStep->pFrom ){ SrcList *pDup = sqlite3SrcListDup(db, pStep->pFrom, 0); @@ -152694,7 +153810,7 @@ static int sqlite3ReturningSubqueryCorrelated(Walker *pWalker, Select *pSelect){ pSrc = pSelect->pSrc; assert( pSrc!=0 ); for(i=0; inSrc; i++){ - if( pSrc->a[i].pTab==pWalker->u.pTab ){ + if( pSrc->a[i].pSTab==pWalker->u.pTab ){ testcase( pSelect->selFlags & SF_Correlated ); pSelect->selFlags |= SF_Correlated; pWalker->eCode = 1; @@ -152765,7 +153881,7 @@ static void codeReturningTrigger( sSelect.pEList = sqlite3ExprListDup(db, pReturning->pReturnEL, 0); sSelect.pSrc = &sFrom; sFrom.nSrc = 1; - sFrom.a[0].pTab = pTab; + sFrom.a[0].pSTab = pTab; sFrom.a[0].zName = pTab->zName; /* tag-20240424-1 */ sFrom.a[0].iCursor = -1; sqlite3SelectPrep(pParse, &sSelect, 0); @@ -153117,7 +154233,7 @@ SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect( ** invocation is disallowed if (a) the sub-program is really a trigger, ** not a foreign key action, and (b) the flag to enable recursive triggers ** is clear. */ - sqlite3VdbeChangeP5(v, (u8)bRecursive); + sqlite3VdbeChangeP5(v, (u16)bRecursive); } } @@ -153476,7 +154592,7 @@ static void updateFromSelect( Expr *pLimit2 = 0; ExprList *pOrderBy2 = 0; sqlite3 *db = pParse->db; - Table *pTab = pTabList->a[0].pTab; + Table *pTab = pTabList->a[0].pSTab; SrcList *pSrc; Expr *pWhere2; int eDest; @@ -153500,8 +154616,8 @@ static void updateFromSelect( if( pSrc ){ assert( pSrc->a[0].fg.notCte ); pSrc->a[0].iCursor = -1; - pSrc->a[0].pTab->nTabRef--; - pSrc->a[0].pTab = 0; + pSrc->a[0].pSTab->nTabRef--; + pSrc->a[0].pSTab = 0; } if( pPk ){ for(i=0; inKeyCol; i++){ @@ -154749,7 +155865,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( int nClause = 0; /* Counter of ON CONFLICT clauses */ assert( pTabList->nSrc==1 ); - assert( pTabList->a[0].pTab!=0 ); + assert( pTabList->a[0].pSTab!=0 ); assert( pUpsert!=0 ); assert( pUpsert->pUpsertTarget!=0 ); @@ -154768,7 +155884,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( if( rc ) return rc; /* Check to see if the conflict target matches the rowid. */ - pTab = pTabList->a[0].pTab; + pTab = pTabList->a[0].pSTab; pTarget = pUpsert->pUpsertTarget; iCursor = pTabList->a[0].iCursor; if( HasRowid(pTab) @@ -155139,6 +156255,9 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( const char *zDbMain; /* Schema name of database to vacuum */ const char *zOut; /* Name of output file */ u32 pgflags = PAGER_SYNCHRONOUS_OFF; /* sync flags for output db */ + u64 iRandom; /* Random value used for zDbVacuum[] */ + char zDbVacuum[42]; /* Name of the ATTACH-ed database used for vacuum */ + if( !db->autoCommit ){ sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction"); @@ -155179,27 +156298,29 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( pMain = db->aDb[iDb].pBt; isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain)); - /* Attach the temporary database as 'vacuum_db'. The synchronous pragma + /* Attach the temporary database as 'vacuum_XXXXXX'. The synchronous pragma ** can be set to 'off' for this file, as it is not recovered if a crash ** occurs anyway. The integrity of the database is maintained by a ** (possibly synchronous) transaction opened on the main database before ** sqlite3BtreeCopyFile() is called. ** ** An optimization would be to use a non-journaled pager. - ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but + ** (Later:) I tried setting "PRAGMA vacuum_XXXXXX.journal_mode=OFF" but ** that actually made the VACUUM run slower. Very little journalling ** actually occurs when doing a vacuum since the vacuum_db is initially ** empty. Only the journal header is written. Apparently it takes more ** time to parse and run the PRAGMA to turn journalling off than it does ** to write the journal header file. */ + sqlite3_randomness(sizeof(iRandom),&iRandom); + sqlite3_snprintf(sizeof(zDbVacuum), zDbVacuum, "vacuum_%016llx", iRandom); nDb = db->nDb; - rc = execSqlF(db, pzErrMsg, "ATTACH %Q AS vacuum_db", zOut); + rc = execSqlF(db, pzErrMsg, "ATTACH %Q AS %s", zOut, zDbVacuum); db->openFlags = saved_openFlags; if( rc!=SQLITE_OK ) goto end_of_vacuum; assert( (db->nDb-1)==nDb ); pDb = &db->aDb[nDb]; - assert( strcmp(pDb->zDbSName,"vacuum_db")==0 ); + assert( strcmp(pDb->zDbSName,zDbVacuum)==0 ); pTemp = pDb->pBt; if( pOut ){ sqlite3_file *id = sqlite3PagerFile(sqlite3BtreePager(pTemp)); @@ -155276,11 +156397,11 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( ** the contents to the temporary database. */ rc = execSqlF(db, pzErrMsg, - "SELECT'INSERT INTO vacuum_db.'||quote(name)" + "SELECT'INSERT INTO %s.'||quote(name)" "||' SELECT*FROM\"%w\".'||quote(name)" - "FROM vacuum_db.sqlite_schema " + "FROM %s.sqlite_schema " "WHERE type='table'AND coalesce(rootpage,1)>0", - zDbMain + zDbVacuum, zDbMain, zDbVacuum ); assert( (db->mDbFlags & DBFLAG_Vacuum)!=0 ); db->mDbFlags &= ~DBFLAG_Vacuum; @@ -155292,11 +156413,11 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( ** from the schema table. */ rc = execSqlF(db, pzErrMsg, - "INSERT INTO vacuum_db.sqlite_schema" + "INSERT INTO %s.sqlite_schema" " SELECT*FROM \"%w\".sqlite_schema" " WHERE type IN('view','trigger')" " OR(type='table'AND rootpage=0)", - zDbMain + zDbVacuum, zDbMain ); if( rc ) goto end_of_vacuum; @@ -156223,7 +157344,9 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ z = (const unsigned char*)zCreateTable; for(i=0; aKeyword[i]; i++){ int tokenType = 0; - do{ z += sqlite3GetToken(z, &tokenType); }while( tokenType==TK_SPACE ); + do{ + z += sqlite3GetToken(z, &tokenType); + }while( tokenType==TK_SPACE || tokenType==TK_COMMENT ); if( tokenType!=aKeyword[i] ){ sqlite3ErrorWithMsg(db, SQLITE_ERROR, "syntax error"); return SQLITE_ERROR; @@ -156260,6 +157383,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ Table *pNew = sParse.pNewTable; Index *pIdx; pTab->aCol = pNew->aCol; + assert( IsOrdinaryTable(pNew) ); sqlite3ExprListDelete(db, pNew->u.tab.pDfltList); pTab->nNVCol = pTab->nCol = pNew->nCol; pTab->tabFlags |= pNew->tabFlags & (TF_WithoutRowid|TF_NoVisibleRowid); @@ -156934,11 +158058,13 @@ struct WhereLoop { u16 nTop; /* Size of TOP vector */ u16 nDistinctCol; /* Index columns used to sort for DISTINCT */ Index *pIndex; /* Index used, or NULL */ + ExprList *pOrderBy; /* ORDER BY clause if this is really a subquery */ } btree; struct { /* Information for virtual tables */ int idxNum; /* Index number */ u32 needFree : 1; /* True if sqlite3_free(idxStr) is needed */ u32 bOmitOffset : 1; /* True to let virtual table handle offset */ + u32 bIdxNumHex : 1; /* Show idxNum as hex in EXPLAIN QUERY PLAN */ i8 isOrdered; /* True if satisfies ORDER BY */ u16 omitMask; /* Terms that may be omitted */ char *idxStr; /* Index identifier string */ @@ -156951,6 +158077,10 @@ struct WhereLoop { /**** whereLoopXfer() copies fields above ***********************/ # define WHERE_LOOP_XFER_SZ offsetof(WhereLoop,nLSlot) u16 nLSlot; /* Number of slots allocated for aLTerm[] */ +#ifdef WHERETRACE_ENABLED + LogEst rStarDelta; /* Cost delta due to star-schema heuristic. Not + ** initialized unless pWInfo->bStarUsed */ +#endif WhereTerm **aLTerm; /* WhereTerms used */ WhereLoop *pNextLoop; /* Next WhereLoop object in the WhereClause */ WhereTerm *aLTermSpace[3]; /* Initial aLTerm[] space */ @@ -156999,7 +158129,7 @@ struct WherePath { Bitmask revLoop; /* aLoop[]s that should be reversed for ORDER BY */ LogEst nRow; /* Estimated number of rows generated by this path */ LogEst rCost; /* Total cost of this path */ - LogEst rUnsorted; /* Total cost of this path ignoring sorting costs */ + LogEst rUnsort; /* Total cost of this path ignoring sorting costs */ i8 isOrdered; /* No. of ORDER BY terms satisfied. -1 for unknown */ WhereLoop **aLoop; /* Array of WhereLoop objects implementing this path */ }; @@ -157272,8 +158402,13 @@ struct WhereInfo { unsigned bDeferredSeek :1; /* Uses OP_DeferredSeek */ unsigned untestedTerms :1; /* Not all WHERE terms resolved by outer loop */ unsigned bOrderedInnerLoop:1;/* True if only the inner-most loop is ordered */ - unsigned sorted :1; /* True if really sorted (not just grouped) */ + unsigned sorted :1; /* True if really sorted (not just grouped) */ + unsigned bStarDone :1; /* True if check for star-query is complete */ + unsigned bStarUsed :1; /* True if star-query heuristic is used */ LogEst nRowOut; /* Estimated number of output rows */ +#ifdef WHERETRACE_ENABLED + LogEst rTotalCost; /* Total cost of the solution */ +#endif int iTop; /* The very beginning of the WHERE loop */ int iEndWhere; /* End of the WHERE clause itself */ WhereLoop *pLoops; /* List of all WhereLoop objects */ @@ -157319,9 +158454,17 @@ SQLITE_PRIVATE int sqlite3WhereExplainBloomFilter( const WhereInfo *pWInfo, /* WHERE clause */ const WhereLevel *pLevel /* Bloom filter on this level */ ); +SQLITE_PRIVATE void sqlite3WhereAddExplainText( + Parse *pParse, /* Parse context */ + int addr, + SrcList *pTabList, /* Table list this loop refers to */ + WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ + u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ +); #else # define sqlite3WhereExplainOneScan(u,v,w,x) 0 # define sqlite3WhereExplainBloomFilter(u,v,w) 0 +# define sqlite3WhereAddExplainText(u,v,w,x,y) #endif /* SQLITE_OMIT_EXPLAIN */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS SQLITE_PRIVATE void sqlite3WhereAddScanStatus( @@ -157424,7 +158567,8 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*); #define WHERE_BLOOMFILTER 0x00400000 /* Consider using a Bloom-filter */ #define WHERE_SELFCULL 0x00800000 /* nOut reduced by extra WHERE terms */ #define WHERE_OMIT_OFFSET 0x01000000 /* Set offset counter to zero */ - /* 0x02000000 -- available for reuse */ +#define WHERE_COROUTINE 0x02000000 /* Implemented by co-routine. + ** NB: False-negatives are possible */ #define WHERE_EXPRIDX 0x04000000 /* Uses an index-on-expressions */ #endif /* !defined(SQLITE_WHEREINT_H) */ @@ -157522,38 +158666,38 @@ static void explainIndexRange(StrAccum *pStr, WhereLoop *pLoop){ } /* -** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN -** command, or if stmt_scanstatus_v2() stats are enabled, or if SQLITE_DEBUG -** was defined at compile-time. If it is not a no-op, a single OP_Explain -** opcode is added to the output to describe the table scan strategy in pLevel. -** -** If an OP_Explain opcode is added to the VM, its address is returned. -** Otherwise, if no OP_Explain is coded, zero is returned. +** This function sets the P4 value of an existing OP_Explain opcode to +** text describing the loop in pLevel. If the OP_Explain opcode already has +** a P4 value, it is freed before it is overwritten. */ -SQLITE_PRIVATE int sqlite3WhereExplainOneScan( +SQLITE_PRIVATE void sqlite3WhereAddExplainText( Parse *pParse, /* Parse context */ + int addr, /* Address of OP_Explain opcode */ SrcList *pTabList, /* Table list this loop refers to */ WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ ){ - int ret = 0; #if !defined(SQLITE_DEBUG) if( sqlite3ParseToplevel(pParse)->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) #endif { + VdbeOp *pOp = sqlite3VdbeGetOp(pParse->pVdbe, addr); + SrcItem *pItem = &pTabList->a[pLevel->iFrom]; - Vdbe *v = pParse->pVdbe; /* VM being constructed */ sqlite3 *db = pParse->db; /* Database handle */ int isSearch; /* True for a SEARCH. False for SCAN. */ WhereLoop *pLoop; /* The controlling WhereLoop object */ u32 flags; /* Flags that describe this loop */ +#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_EXPLAIN) char *zMsg; /* Text to add to EQP output */ +#endif StrAccum str; /* EQP output string */ char zBuf[100]; /* Initial space for EQP output string */ + if( db->mallocFailed ) return; + pLoop = pLevel->pWLoop; flags = pLoop->wsFlags; - if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_OR_SUBCLAUSE) ) return 0; isSearch = (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0 || ((flags&WHERE_VIRTUALTABLE)==0 && (pLoop->u.btree.nEq>0)) @@ -157569,7 +158713,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( assert( pLoop->u.btree.pIndex!=0 ); pIdx = pLoop->u.btree.pIndex; assert( !(flags&WHERE_AUTO_INDEX) || (flags&WHERE_IDX_ONLY) ); - if( !HasRowid(pItem->pTab) && IsPrimaryKeyIndex(pIdx) ){ + if( !HasRowid(pItem->pSTab) && IsPrimaryKeyIndex(pIdx) ){ if( isSearch ){ zFmt = "PRIMARY KEY"; } @@ -157577,7 +158721,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( zFmt = "AUTOMATIC PARTIAL COVERING INDEX"; }else if( flags & WHERE_AUTO_INDEX ){ zFmt = "AUTOMATIC COVERING INDEX"; - }else if( flags & WHERE_IDX_ONLY ){ + }else if( flags & (WHERE_IDX_ONLY|WHERE_EXPRIDX) ){ zFmt = "COVERING INDEX %s"; }else{ zFmt = "INDEX %s"; @@ -157612,7 +158756,9 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( } #ifndef SQLITE_OMIT_VIRTUALTABLE else if( (flags & WHERE_VIRTUALTABLE)!=0 ){ - sqlite3_str_appendf(&str, " VIRTUAL TABLE INDEX %d:%s", + sqlite3_str_appendall(&str, " VIRTUAL TABLE INDEX "); + sqlite3_str_appendf(&str, + pLoop->u.vtab.bIdxNumHex ? "0x%x:%s" : "%d:%s", pLoop->u.vtab.idxNum, pLoop->u.vtab.idxStr); } #endif @@ -157627,10 +158773,50 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( sqlite3_str_append(&str, " (~1 row)", 9); } #endif +#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_EXPLAIN) zMsg = sqlite3StrAccumFinish(&str); sqlite3ExplainBreakpoint("",zMsg); - ret = sqlite3VdbeAddOp4(v, OP_Explain, sqlite3VdbeCurrentAddr(v), - pParse->addrExplain, 0, zMsg,P4_DYNAMIC); +#endif + + assert( pOp->opcode==OP_Explain ); + assert( pOp->p4type==P4_DYNAMIC || pOp->p4.z==0 ); + sqlite3DbFree(db, pOp->p4.z); + pOp->p4type = P4_DYNAMIC; + pOp->p4.z = sqlite3StrAccumFinish(&str); + } +} + + +/* +** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN +** command, or if stmt_scanstatus_v2() stats are enabled, or if SQLITE_DEBUG +** was defined at compile-time. If it is not a no-op, a single OP_Explain +** opcode is added to the output to describe the table scan strategy in pLevel. +** +** If an OP_Explain opcode is added to the VM, its address is returned. +** Otherwise, if no OP_Explain is coded, zero is returned. +*/ +SQLITE_PRIVATE int sqlite3WhereExplainOneScan( + Parse *pParse, /* Parse context */ + SrcList *pTabList, /* Table list this loop refers to */ + WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ + u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ +){ + int ret = 0; +#if !defined(SQLITE_DEBUG) + if( sqlite3ParseToplevel(pParse)->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) +#endif + { + if( (pLevel->pWLoop->wsFlags & WHERE_MULTI_OR)==0 + && (wctrlFlags & WHERE_OR_SUBCLAUSE)==0 + ){ + Vdbe *v = pParse->pVdbe; + int addr = sqlite3VdbeCurrentAddr(v); + ret = sqlite3VdbeAddOp3( + v, OP_Explain, addr, pParse->addrExplain, pLevel->pWLoop->rRun + ); + sqlite3WhereAddExplainText(pParse, addr, pTabList, pLevel, wctrlFlags); + } } return ret; } @@ -157665,7 +158851,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainBloomFilter( sqlite3_str_appendf(&str, "BLOOM FILTER ON %S (", pItem); pLoop = pLevel->pWLoop; if( pLoop->wsFlags & WHERE_IPK ){ - const Table *pTab = pItem->pTab; + const Table *pTab = pItem->pSTab; if( pTab->iPKey>=0 ){ sqlite3_str_appendf(&str, "%s=?", pTab->aCol[pTab->iPKey].zCnName); }else{ @@ -157728,8 +158914,11 @@ SQLITE_PRIVATE void sqlite3WhereAddScanStatus( sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iIdxCur); } }else{ - int addr = pSrclist->a[pLvl->iFrom].addrFillSub; - VdbeOp *pOp = sqlite3VdbeGetOp(v, addr-1); + int addr; + VdbeOp *pOp; + assert( pSrclist->a[pLvl->iFrom].fg.isSubquery ); + addr = pSrclist->a[pLvl->iFrom].u4.pSubq->addrFillSub; + pOp = sqlite3VdbeGetOp(v, addr-1); assert( sqlite3VdbeDb(v)->mallocFailed || pOp->opcode==OP_InitCoroutine ); assert( sqlite3VdbeDb(v)->mallocFailed || pOp->p2>addr ); sqlite3VdbeScanStatusRange(v, addrExplain, addr, pOp->p2-1); @@ -157872,6 +159061,39 @@ static void updateRangeAffinityStr( } } +/* +** The pOrderBy->a[].u.x.iOrderByCol values might be incorrect because +** columns might have been rearranged in the result set. This routine +** fixes them up. +** +** pEList is the new result set. The pEList->a[].u.x.iOrderByCol values +** contain the *old* locations of each expression. This is a temporary +** use of u.x.iOrderByCol, not its intended use. The caller must reset +** u.x.iOrderByCol back to zero for all entries in pEList before the +** caller returns. +** +** This routine changes pOrderBy->a[].u.x.iOrderByCol values from +** pEList->a[N].u.x.iOrderByCol into N+1. (The "+1" is because of the 1-based +** indexing used by iOrderByCol.) Or if no match, iOrderByCol is set to zero. +*/ +static void adjustOrderByCol(ExprList *pOrderBy, ExprList *pEList){ + int i, j; + if( pOrderBy==0 ) return; + for(i=0; inExpr; i++){ + int t = pOrderBy->a[i].u.x.iOrderByCol; + if( t==0 ) continue; + for(j=0; jnExpr; j++){ + if( pEList->a[j].u.x.iOrderByCol==t ){ + pOrderBy->a[i].u.x.iOrderByCol = j+1; + break; + } + } + if( j>=pEList->nExpr ){ + pOrderBy->a[i].u.x.iOrderByCol = 0; + } + } +} + /* ** pX is an expression of the form: (vector) IN (SELECT ...) @@ -157935,6 +159157,7 @@ static Expr *removeUnindexableInClauseTerms( if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */ pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr); pOrigRhs->a[iField].pExpr = 0; + if( pRhs ) pRhs->a[pRhs->nExpr-1].u.x.iOrderByCol = iField+1; if( pOrigLhs ){ assert( pOrigLhs->a[iField].pExpr!=0 ); pLhs = sqlite3ExprListAppend(pParse,pLhs,pOrigLhs->a[iField].pExpr); @@ -157948,6 +159171,7 @@ static Expr *removeUnindexableInClauseTerms( pNew->pLeft->x.pList = pLhs; } pSelect->pEList = pRhs; + pSelect->selId = ++pParse->nSelect; /* Req'd for SubrtnSig validity */ if( pLhs && pLhs->nExpr==1 ){ /* Take care here not to generate a TK_VECTOR containing only a ** single value. Since the parser never creates such a vector, some @@ -157957,18 +159181,16 @@ static Expr *removeUnindexableInClauseTerms( sqlite3ExprDelete(db, pNew->pLeft); pNew->pLeft = p; } - if( pSelect->pOrderBy ){ - /* If the SELECT statement has an ORDER BY clause, zero the - ** iOrderByCol variables. These are set to non-zero when an - ** ORDER BY term exactly matches one of the terms of the - ** result-set. Since the result-set of the SELECT statement may - ** have been modified or reordered, these variables are no longer - ** set correctly. Since setting them is just an optimization, - ** it's easiest just to zero them here. */ - ExprList *pOrderBy = pSelect->pOrderBy; - for(i=0; inExpr; i++){ - pOrderBy->a[i].u.x.iOrderByCol = 0; - } + + /* If either the ORDER BY clause or the GROUP BY clause contains + ** references to result-set columns, those references might now be + ** obsolete. So fix them up. + */ + assert( pRhs!=0 || db->mallocFailed ); + if( pRhs ){ + adjustOrderByCol(pSelect->pOrderBy, pRhs); + adjustOrderByCol(pSelect->pGroupBy, pRhs); + for(i=0; inExpr; i++) pRhs->a[i].u.x.iOrderByCol = 0; } #if 0 @@ -157983,6 +159205,147 @@ static Expr *removeUnindexableInClauseTerms( } +#ifndef SQLITE_OMIT_SUBQUERY +/* +** Generate code for a single X IN (....) term of the WHERE clause. +** +** This is a special-case of codeEqualityTerm() that works for IN operators +** only. It is broken out into a subroutine because this case is +** uncommon and by splitting it off into a subroutine, the common case +** runs faster. +** +** The current value for the constraint is left in register iTarget. +** This routine sets up a loop that will iterate over all values of X. +*/ +static SQLITE_NOINLINE void codeINTerm( + Parse *pParse, /* The parsing context */ + WhereTerm *pTerm, /* The term of the WHERE clause to be coded */ + WhereLevel *pLevel, /* The level of the FROM clause we are working on */ + int iEq, /* Index of the equality term within this level */ + int bRev, /* True for reverse-order IN operations */ + int iTarget /* Attempt to leave results in this register */ +){ + Expr *pX = pTerm->pExpr; + int eType = IN_INDEX_NOOP; + int iTab; + struct InLoop *pIn; + WhereLoop *pLoop = pLevel->pWLoop; + Vdbe *v = pParse->pVdbe; + int i; + int nEq = 0; + int *aiMap = 0; + + if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 + && pLoop->u.btree.pIndex!=0 + && pLoop->u.btree.pIndex->aSortOrder[iEq] + ){ + testcase( iEq==0 ); + testcase( bRev ); + bRev = !bRev; + } + assert( pX->op==TK_IN ); + + for(i=0; iaLTerm[i] && pLoop->aLTerm[i]->pExpr==pX ){ + disableTerm(pLevel, pTerm); + return; + } + } + for(i=iEq;inLTerm; i++){ + assert( pLoop->aLTerm[i]!=0 ); + if( pLoop->aLTerm[i]->pExpr==pX ) nEq++; + } + + iTab = 0; + if( !ExprUseXSelect(pX) || pX->x.pSelect->pEList->nExpr==1 ){ + eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab); + }else{ + Expr *pExpr = pTerm->pExpr; + if( pExpr->iTable==0 || !ExprHasProperty(pExpr, EP_Subrtn) ){ + sqlite3 *db = pParse->db; + pX = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX); + if( !db->mallocFailed ){ + aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq); + eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap,&iTab); + pExpr->iTable = iTab; + } + sqlite3ExprDelete(db, pX); + }else{ + int n = sqlite3ExprVectorSize(pX->pLeft); + aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*MAX(nEq,n)); + eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab); + } + pX = pExpr; + } + + if( eType==IN_INDEX_INDEX_DESC ){ + testcase( bRev ); + bRev = !bRev; + } + sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0); + VdbeCoverageIf(v, bRev); + VdbeCoverageIf(v, !bRev); + + assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); + pLoop->wsFlags |= WHERE_IN_ABLE; + if( pLevel->u.in.nIn==0 ){ + pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse); + } + if( iEq>0 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 ){ + pLoop->wsFlags |= WHERE_IN_EARLYOUT; + } + + i = pLevel->u.in.nIn; + pLevel->u.in.nIn += nEq; + pLevel->u.in.aInLoop = + sqlite3WhereRealloc(pTerm->pWC->pWInfo, + pLevel->u.in.aInLoop, + sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn); + pIn = pLevel->u.in.aInLoop; + if( pIn ){ + int iMap = 0; /* Index in aiMap[] */ + pIn += i; + for(i=iEq;inLTerm; i++){ + if( pLoop->aLTerm[i]->pExpr==pX ){ + int iOut = iTarget + i - iEq; + if( eType==IN_INDEX_ROWID ){ + pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iOut); + }else{ + int iCol = aiMap ? aiMap[iMap++] : 0; + pIn->addrInTop = sqlite3VdbeAddOp3(v,OP_Column,iTab, iCol, iOut); + } + sqlite3VdbeAddOp1(v, OP_IsNull, iOut); VdbeCoverage(v); + if( i==iEq ){ + pIn->iCur = iTab; + pIn->eEndLoopOp = bRev ? OP_Prev : OP_Next; + if( iEq>0 ){ + pIn->iBase = iTarget - i; + pIn->nPrefix = i; + }else{ + pIn->nPrefix = 0; + } + }else{ + pIn->eEndLoopOp = OP_Noop; + } + pIn++; + } + } + testcase( iEq>0 + && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 + && (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ); + if( iEq>0 + && (pLoop->wsFlags & (WHERE_IN_SEEKSCAN|WHERE_VIRTUALTABLE))==0 + ){ + sqlite3VdbeAddOp3(v, OP_SeekHit, pLevel->iIdxCur, 0, iEq); + } + }else{ + pLevel->u.in.nIn = 0; + } + sqlite3DbFree(pParse->db, aiMap); +} +#endif + + /* ** Generate code for a single equality term of the WHERE clause. An equality ** term can be either X=expr or X IN (...). pTerm is the term to be @@ -158007,7 +159370,6 @@ static int codeEqualityTerm( int iTarget /* Attempt to leave results in this register */ ){ Expr *pX = pTerm->pExpr; - Vdbe *v = pParse->pVdbe; int iReg; /* Register holding results */ assert( pLevel->pWLoop->aLTerm[iEq]==pTerm ); @@ -158016,125 +159378,12 @@ static int codeEqualityTerm( iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget); }else if( pX->op==TK_ISNULL ){ iReg = iTarget; - sqlite3VdbeAddOp2(v, OP_Null, 0, iReg); + sqlite3VdbeAddOp2(pParse->pVdbe, OP_Null, 0, iReg); #ifndef SQLITE_OMIT_SUBQUERY }else{ - int eType = IN_INDEX_NOOP; - int iTab; - struct InLoop *pIn; - WhereLoop *pLoop = pLevel->pWLoop; - int i; - int nEq = 0; - int *aiMap = 0; - - if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 - && pLoop->u.btree.pIndex!=0 - && pLoop->u.btree.pIndex->aSortOrder[iEq] - ){ - testcase( iEq==0 ); - testcase( bRev ); - bRev = !bRev; - } assert( pX->op==TK_IN ); iReg = iTarget; - - for(i=0; iaLTerm[i] && pLoop->aLTerm[i]->pExpr==pX ){ - disableTerm(pLevel, pTerm); - return iTarget; - } - } - for(i=iEq;inLTerm; i++){ - assert( pLoop->aLTerm[i]!=0 ); - if( pLoop->aLTerm[i]->pExpr==pX ) nEq++; - } - - iTab = 0; - if( !ExprUseXSelect(pX) || pX->x.pSelect->pEList->nExpr==1 ){ - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab); - }else{ - Expr *pExpr = pTerm->pExpr; - if( pExpr->iTable==0 || !ExprHasProperty(pExpr, EP_Subrtn) ){ - sqlite3 *db = pParse->db; - pX = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX); - if( !db->mallocFailed ){ - aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq); - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap,&iTab); - pExpr->iTable = iTab; - } - sqlite3ExprDelete(db, pX); - }else{ - int n = sqlite3ExprVectorSize(pX->pLeft); - aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*MAX(nEq,n)); - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab); - } - pX = pExpr; - } - - if( eType==IN_INDEX_INDEX_DESC ){ - testcase( bRev ); - bRev = !bRev; - } - sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0); - VdbeCoverageIf(v, bRev); - VdbeCoverageIf(v, !bRev); - - assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); - pLoop->wsFlags |= WHERE_IN_ABLE; - if( pLevel->u.in.nIn==0 ){ - pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse); - } - if( iEq>0 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 ){ - pLoop->wsFlags |= WHERE_IN_EARLYOUT; - } - - i = pLevel->u.in.nIn; - pLevel->u.in.nIn += nEq; - pLevel->u.in.aInLoop = - sqlite3WhereRealloc(pTerm->pWC->pWInfo, - pLevel->u.in.aInLoop, - sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn); - pIn = pLevel->u.in.aInLoop; - if( pIn ){ - int iMap = 0; /* Index in aiMap[] */ - pIn += i; - for(i=iEq;inLTerm; i++){ - if( pLoop->aLTerm[i]->pExpr==pX ){ - int iOut = iReg + i - iEq; - if( eType==IN_INDEX_ROWID ){ - pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iOut); - }else{ - int iCol = aiMap ? aiMap[iMap++] : 0; - pIn->addrInTop = sqlite3VdbeAddOp3(v,OP_Column,iTab, iCol, iOut); - } - sqlite3VdbeAddOp1(v, OP_IsNull, iOut); VdbeCoverage(v); - if( i==iEq ){ - pIn->iCur = iTab; - pIn->eEndLoopOp = bRev ? OP_Prev : OP_Next; - if( iEq>0 ){ - pIn->iBase = iReg - i; - pIn->nPrefix = i; - }else{ - pIn->nPrefix = 0; - } - }else{ - pIn->eEndLoopOp = OP_Noop; - } - pIn++; - } - } - testcase( iEq>0 - && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 - && (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ); - if( iEq>0 - && (pLoop->wsFlags & (WHERE_IN_SEEKSCAN|WHERE_VIRTUALTABLE))==0 - ){ - sqlite3VdbeAddOp3(v, OP_SeekHit, pLevel->iIdxCur, 0, iEq); - } - }else{ - pLevel->u.in.nIn = 0; - } - sqlite3DbFree(pParse->db, aiMap); + codeINTerm(pParse, pTerm, pLevel, iEq, bRev, iTarget); #endif } @@ -158806,7 +160055,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( iCur = pTabItem->iCursor; pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur); bRev = (pWInfo->revMask>>iLevel)&1; - VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName)); + VdbeModuleComment((v, "Begin WHERE-loop%d: %s", + iLevel, pTabItem->pSTab->zName)); #if WHERETRACE_ENABLED /* 0x4001 */ if( sqlite3WhereTrace & 0x1 ){ sqlite3DebugPrintf("Coding level %d of %d: notReady=%llx iFrom=%d\n", @@ -158861,11 +160111,15 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( /* Special case of a FROM clause subquery implemented as a co-routine */ if( pTabItem->fg.viaCoroutine ){ - int regYield = pTabItem->regReturn; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub); + int regYield; + Subquery *pSubq; + assert( pTabItem->fg.isSubquery && pTabItem->u4.pSubq!=0 ); + pSubq = pTabItem->u4.pSubq; + regYield = pSubq->regReturn; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSubq->addrFillSub); pLevel->p2 = sqlite3VdbeAddOp2(v, OP_Yield, regYield, addrBrk); VdbeCoverage(v); - VdbeComment((v, "next row of %s", pTabItem->pTab->zName)); + VdbeComment((v, "next row of %s", pTabItem->pSTab->zName)); pLevel->op = OP_Goto; }else @@ -159594,7 +160848,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( int untestedTerms = 0; /* Some terms not completely tested */ int ii; /* Loop counter */ Expr *pAndExpr = 0; /* An ".. AND (...)" expression */ - Table *pTab = pTabItem->pTab; + Table *pTab = pTabItem->pSTab; pTerm = pLoop->aLTerm[0]; assert( pTerm!=0 ); @@ -160053,7 +161307,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** least once. This is accomplished by storing the PK for the row in ** both the iMatch index and the regBloom Bloom filter. */ - pTab = pWInfo->pTabList->a[pLevel->iFrom].pTab; + pTab = pWInfo->pTabList->a[pLevel->iFrom].pSTab; if( HasRowid(pTab) ){ r = sqlite3GetTempRange(pParse, 2); sqlite3ExprCodeGetColumnOfTable(v, pTab, pLevel->iTabCur, -1, r+1); @@ -160160,7 +161414,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( Bitmask mAll = 0; int k; - ExplainQueryPlan((pParse, 1, "RIGHT-JOIN %s", pTabItem->pTab->zName)); + ExplainQueryPlan((pParse, 1, "RIGHT-JOIN %s", pTabItem->pSTab->zName)); sqlite3VdbeNoJumpsOutsideSubrtn(v, pRJ->addrSubrtn, pRJ->endSubrtn, pRJ->regReturn); for(k=0; kpTabList->a[pWInfo->a[k].iFrom]; mAll |= pWInfo->a[k].pWLoop->maskSelf; if( pRight->fg.viaCoroutine ){ + Subquery *pSubq; + assert( pRight->fg.isSubquery && pRight->u4.pSubq!=0 ); + pSubq = pRight->u4.pSubq; + assert( pSubq->pSelect!=0 && pSubq->pSelect->pEList!=0 ); sqlite3VdbeAddOp3( - v, OP_Null, 0, pRight->regResult, - pRight->regResult + pRight->pSelect->pEList->nExpr-1 + v, OP_Null, 0, pSubq->regResult, + pSubq->regResult + pSubq->pSelect->pEList->nExpr-1 ); } sqlite3VdbeAddOp1(v, OP_NullRow, pWInfo->a[k].iTabCur); @@ -160210,7 +161468,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( int nPk; int jmp; int addrCont = sqlite3WhereContinueLabel(pSubWInfo); - Table *pTab = pTabItem->pTab; + Table *pTab = pTabItem->pSTab; if( HasRowid(pTab) ){ sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, -1, r); nPk = 1; @@ -160343,7 +161601,12 @@ static int allowedOp(int op){ assert( TK_LT>TK_EQ && TK_LTTK_EQ && TK_LE=TK_EQ && op<=TK_GE) || op==TK_ISNULL || op==TK_IS; + assert( TK_INTK_GE ) return 0; + if( op>=TK_EQ ) return 1; + return op==TK_IN || op==TK_ISNULL || op==TK_IS; } /* @@ -160376,15 +161639,16 @@ static u16 exprCommute(Parse *pParse, Expr *pExpr){ static u16 operatorMask(int op){ u16 c; assert( allowedOp(op) ); - if( op==TK_IN ){ + if( op>=TK_EQ ){ + assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff ); + c = (u16)(WO_EQ<<(op-TK_EQ)); + }else if( op==TK_IN ){ c = WO_IN; }else if( op==TK_ISNULL ){ c = WO_ISNULL; - }else if( op==TK_IS ){ - c = WO_IS; }else{ - assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff ); - c = (u16)(WO_EQ<<(op-TK_EQ)); + assert( op==TK_IS ); + c = WO_IS; } assert( op!=TK_ISNULL || c==WO_ISNULL ); assert( op!=TK_IN || c==WO_IN ); @@ -160455,12 +161719,28 @@ static int isLikeOrGlob( z = (u8*)pRight->u.zToken; } if( z ){ - - /* Count the number of prefix characters prior to the first wildcard */ + /* Count the number of prefix bytes prior to the first wildcard, + ** U+fffd character, or malformed utf-8. If the underlying database + ** has a UTF16LE encoding, then only consider ASCII characters. Note that + ** the encoding of z[] is UTF8 - we are dealing with only UTF8 here in this + ** code, but the database engine itself might be processing content using a + ** different encoding. */ cnt = 0; while( (c=z[cnt])!=0 && c!=wc[0] && c!=wc[1] && c!=wc[2] ){ cnt++; - if( c==wc[3] && z[cnt]!=0 ) cnt++; + if( c==wc[3] && z[cnt]>0 && z[cnt]<0x80 ){ + cnt++; + }else if( c>=0x80 ){ + const u8 *z2 = z+cnt-1; + if( c==0xff || sqlite3Utf8Read(&z2)==0xfffd /* bad utf-8 */ + || ENC(db)==SQLITE_UTF16LE + ){ + cnt--; + break; + }else{ + cnt = (int)(z2-z); + } + } } /* The optimization is possible only if (1) the pattern does not begin @@ -160471,11 +161751,11 @@ static int isLikeOrGlob( ** range search. The third is because the caller assumes that the pattern ** consists of at least one character after all escapes have been ** removed. */ - if( (cnt>1 || (cnt>0 && z[0]!=wc[3])) && 255!=(u8)z[cnt-1] ){ + if( (cnt>1 || (cnt>0 && z[0]!=wc[3])) && ALWAYS(255!=(u8)z[cnt-1]) ){ Expr *pPrefix; /* A "complete" match if the pattern ends with "*" or "%" */ - *pisComplete = c==wc[0] && z[cnt+1]==0; + *pisComplete = c==wc[0] && z[cnt+1]==0 && ENC(db)!=SQLITE_UTF16LE; /* Get the pattern prefix. Remove all escapes from the prefix. */ pPrefix = sqlite3Expr(db, TK_STRING, (char*)z); @@ -160671,6 +161951,13 @@ static int isAuxiliaryVtabOperator( } } } + }else if( pExpr->op>=TK_EQ ){ + /* Comparison operators are a common case. Save a few comparisons for + ** that common case by terminating early. */ + assert( TK_NE < TK_EQ ); + assert( TK_ISNOT < TK_EQ ); + assert( TK_NOTNULL < TK_EQ ); + return 0; }else if( pExpr->op==TK_NE || pExpr->op==TK_ISNOT || pExpr->op==TK_NOTNULL ){ int res = 0; Expr *pLeft = pExpr->pLeft; @@ -161187,7 +162474,9 @@ static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){ if( ALWAYS(pSrc!=0) ){ int i; for(i=0; inSrc; i++){ - mask |= exprSelectUsage(pMaskSet, pSrc->a[i].pSelect); + if( pSrc->a[i].fg.isSubquery ){ + mask |= exprSelectUsage(pMaskSet, pSrc->a[i].u4.pSubq->pSelect); + } if( pSrc->a[i].fg.isUsing==0 ){ mask |= sqlite3WhereExprUsage(pMaskSet, pSrc->a[i].u3.pOn); } @@ -161225,7 +162514,7 @@ static SQLITE_NOINLINE int exprMightBeIndexed2( int iCur; do{ iCur = pFrom->a[j].iCursor; - for(pIdx=pFrom->a[j].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + for(pIdx=pFrom->a[j].pSTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( pIdx->aColExpr==0 ) continue; for(i=0; inKeyCol; i++){ if( pIdx->aiColumn[i]!=XN_EXPR ) continue; @@ -161269,7 +162558,7 @@ static int exprMightBeIndexed( for(i=0; inSrc; i++){ Index *pIdx; - for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + for(pIdx=pFrom->a[i].pSTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( pIdx->aColExpr ){ return exprMightBeIndexed2(pFrom,aiCurCol,pExpr,i); } @@ -161597,9 +162886,8 @@ static void exprAnalyze( } if( !db->mallocFailed ){ - u8 c, *pC; /* Last character before the first wildcard */ + u8 *pC; /* Last character before the first wildcard */ pC = (u8*)&pStr2->u.zToken[sqlite3Strlen30(pStr2->u.zToken)-1]; - c = *pC; if( noCase ){ /* The point is to increment the last character before the first ** wildcard. But if we increment '@', that will push it into the @@ -161607,10 +162895,17 @@ static void exprAnalyze( ** inequality. To avoid this, make sure to also run the full ** LIKE on all candidate expressions by clearing the isComplete flag */ - if( c=='A'-1 ) isComplete = 0; - c = sqlite3UpperToLower[c]; + if( *pC=='A'-1 ) isComplete = 0; + *pC = sqlite3UpperToLower[*pC]; } - *pC = c + 1; + + /* Increment the value of the last utf8 character in the prefix. */ + while( *pC==0xBF && pC>(u8*)pStr2->u.zToken ){ + *pC = 0x80; + pC--; + } + assert( *pC!=0xFF ); /* isLikeOrGlob() guarantees this */ + (*pC)++; } zCollSeqName = noCase ? "NOCASE" : sqlite3StrBINARY; pNewExpr1 = sqlite3ExprDup(db, pLeft, 0); @@ -161812,7 +163107,7 @@ static void whereAddLimitExpr( Expr *pNew; int iVal = 0; - if( sqlite3ExprIsInteger(pExpr, &iVal) && iVal>=0 ){ + if( sqlite3ExprIsInteger(pExpr, &iVal, pParse) && iVal>=0 ){ Expr *pVal = sqlite3Expr(db, TK_INTEGER, 0); if( pVal==0 ) return; ExprSetProperty(pVal, EP_IntValue); @@ -161857,7 +163152,7 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Selec assert( p!=0 && p->pLimit!=0 ); /* 1 -- checked by caller */ if( p->pGroupBy==0 && (p->selFlags & (SF_Distinct|SF_Aggregate))==0 /* 2 */ - && (p->pSrc->nSrc==1 && IsVirtual(p->pSrc->a[0].pTab)) /* 3 */ + && (p->pSrc->nSrc==1 && IsVirtual(p->pSrc->a[0].pSTab)) /* 3 */ ){ ExprList *pOrderBy = p->pOrderBy; int iCsr = p->pSrc->a[0].iCursor; @@ -162078,7 +163373,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs( Expr *pColRef; Expr *pTerm; if( pItem->fg.isTabFunc==0 ) return; - pTab = pItem->pTab; + pTab = pItem->pSTab; assert( pTab!=0 ); pArgs = pItem->u1.pFuncArg; if( pArgs==0 ) return; @@ -162762,7 +164057,7 @@ static int isDistinctRedundant( ** clause is redundant. */ if( pTabList->nSrc!=1 ) return 0; iBase = pTabList->a[0].iCursor; - pTab = pTabList->a[0].pTab; + pTab = pTabList->a[0].pSTab; /* If any of the expressions is an IPK column on table iBase, then return ** true. Note: The (p->iTable==iBase) part of this test may be false if the @@ -162837,6 +164132,12 @@ static void translateColumnToCopy( VdbeOp *pOp = sqlite3VdbeGetOp(v, iStart); int iEnd = sqlite3VdbeCurrentAddr(v); if( pParse->db->mallocFailed ) return; +#ifdef SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("CHECKING for column-to-copy on cursor %d for %d..%d\n", + iTabCur, iStart, iEnd); + } +#endif for(; iStartp1!=iTabCur ) continue; if( pOp->opcode==OP_Column ){ @@ -162951,13 +164252,52 @@ static int constraintCompatibleWithOuterJoin( return 0; } if( (pSrc->fg.jointype & (JT_LEFT|JT_RIGHT))!=0 - && ExprHasProperty(pTerm->pExpr, EP_InnerON) + && NEVER(ExprHasProperty(pTerm->pExpr, EP_InnerON)) ){ return 0; } return 1; } +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX +/* +** Return true if column iCol of table pTab seem like it might be a +** good column to use as part of a query-time index. +** +** Current algorithm (subject to improvement!): +** +** 1. If iCol is already the left-most column of some other index, +** then return false. +** +** 2. If iCol is part of an existing index that has an aiRowLogEst of +** more than 20, then return false. +** +** 3. If no disqualifying conditions above are found, return true. +** +** 2025-01-03: I experimented with a new rule that returns false if the +** the datatype of the column is "BOOLEAN". This did not improve +** performance on any queries at hand, but it did burn CPU cycles, so the +** idea was not committed. +*/ +static SQLITE_NOINLINE int columnIsGoodIndexCandidate( + const Table *pTab, + int iCol +){ + const Index *pIdx; + for(pIdx = pTab->pIndex; pIdx!=0; pIdx=pIdx->pNext){ + int j; + for(j=0; jnKeyCol; j++){ + if( pIdx->aiColumn[j]==iCol ){ + if( j==0 ) return 0; + if( pIdx->hasStat1 && pIdx->aiRowLogEst[j+1]>20 ) return 0; + break; + } + } + } + return 1; +} +#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ + #ifndef SQLITE_OMIT_AUTOMATIC_INDEX @@ -162972,6 +164312,8 @@ static int termCanDriveIndex( const Bitmask notReady /* Tables in outer loops of the join */ ){ char aff; + int leftCol; + if( pTerm->leftCursor!=pSrc->iCursor ) return 0; if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) return 0; assert( (pSrc->fg.jointype & JT_RIGHT)==0 ); @@ -162982,11 +164324,12 @@ static int termCanDriveIndex( } if( (pTerm->prereqRight & notReady)!=0 ) return 0; assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); - if( pTerm->u.x.leftColumn<0 ) return 0; - aff = pSrc->pTab->aCol[pTerm->u.x.leftColumn].affinity; + leftCol = pTerm->u.x.leftColumn; + if( leftCol<0 ) return 0; + aff = pSrc->pSTab->aCol[leftCol].affinity; if( !sqlite3IndexAffinityOk(pTerm->pExpr, aff) ) return 0; testcase( pTerm->pExpr->op==TK_IS ); - return 1; + return columnIsGoodIndexCandidate(pSrc->pSTab, leftCol); } #endif @@ -163019,7 +164362,7 @@ static void explainAutomaticIndex( sqlite3_str *pStr = sqlite3_str_new(pParse->db); sqlite3_str_appendf(pStr,"CREATE AUTOMATIC INDEX ON %s(", pTab->zName); assert( pIdx->nColumn>1 ); - assert( pIdx->aiColumn[pIdx->nColumn-1]==XN_ROWID ); + assert( pIdx->aiColumn[pIdx->nColumn-1]==XN_ROWID || !HasRowid(pTab) ); for(ii=0; ii<(pIdx->nColumn-1); ii++){ const char *zName = 0; int iCol = pIdx->aiColumn[ii]; @@ -163094,7 +164437,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( nKeyCol = 0; pTabList = pWC->pWInfo->pTabList; pSrc = &pTabList->a[pLevel->iFrom]; - pTable = pSrc->pTab; + pTable = pSrc->pSTab; pWCEnd = &pWC->a[pWC->nTerm]; pLoop = pLevel->pWLoop; idxCols = 0; @@ -163150,6 +164493,19 @@ static SQLITE_NOINLINE void constructAutomaticIndex( }else{ extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); } + if( !HasRowid(pTable) ){ + /* For WITHOUT ROWID tables, ensure that all PRIMARY KEY columns are + ** either in the idxCols mask or in the extraCols mask */ + for(i=0; inCol; i++){ + if( (pTable->aCol[i].colFlags & COLFLAG_PRIMKEY)==0 ) continue; + if( i>=BMS-1 ){ + extraCols |= MASKBIT(BMS-1); + break; + } + if( idxCols & MASKBIT(i) ) continue; + extraCols |= MASKBIT(i); + } + } mxBitCol = MIN(BMS-1,pTable->nCol); testcase( pTable->nCol==BMS-1 ); testcase( pTable->nCol==BMS-2 ); @@ -163161,7 +164517,8 @@ static SQLITE_NOINLINE void constructAutomaticIndex( } /* Construct the Index object to describe this index */ - pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+1, 0, &zNotUsed); + pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+HasRowid(pTable), + 0, &zNotUsed); if( pIdx==0 ) goto end_auto_index_create; pLoop->u.btree.pIndex = pIdx; pIdx->zName = "auto-index"; @@ -163217,8 +164574,10 @@ static SQLITE_NOINLINE void constructAutomaticIndex( } } assert( n==nKeyCol ); - pIdx->aiColumn[n] = XN_ROWID; - pIdx->azColl[n] = sqlite3StrBINARY; + if( HasRowid(pTable) ){ + pIdx->aiColumn[n] = XN_ROWID; + pIdx->azColl[n] = sqlite3StrBINARY; + } /* Create the automatic index */ explainAutomaticIndex(pParse, pIdx, pPartial!=0, &addrExp); @@ -163236,12 +164595,17 @@ static SQLITE_NOINLINE void constructAutomaticIndex( /* Fill the automatic index with content */ assert( pSrc == &pWC->pWInfo->pTabList->a[pLevel->iFrom] ); if( pSrc->fg.viaCoroutine ){ - int regYield = pSrc->regReturn; + int regYield; + Subquery *pSubq; + assert( pSrc->fg.isSubquery ); + pSubq = pSrc->u4.pSubq; + assert( pSubq!=0 ); + regYield = pSubq->regReturn; addrCounter = sqlite3VdbeAddOp2(v, OP_Integer, 0, 0); - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSrc->addrFillSub); + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSubq->addrFillSub); addrTop = sqlite3VdbeAddOp1(v, OP_Yield, regYield); VdbeCoverage(v); - VdbeComment((v, "next row of %s", pSrc->pTab->zName)); + VdbeComment((v, "next row of %s", pSrc->pSTab->zName)); }else{ addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v); } @@ -163263,11 +164627,12 @@ static SQLITE_NOINLINE void constructAutomaticIndex( sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue); if( pSrc->fg.viaCoroutine ){ + assert( pSrc->fg.isSubquery && pSrc->u4.pSubq!=0 ); sqlite3VdbeChangeP2(v, addrCounter, regBase+n); testcase( pParse->db->mallocFailed ); assert( pLevel->iIdxCur>0 ); translateColumnToCopy(pParse, addrTop, pLevel->iTabCur, - pSrc->regResult, pLevel->iIdxCur); + pSrc->u4.pSubq->regResult, pLevel->iIdxCur); sqlite3VdbeGoto(v, addrTop); pSrc->fg.viaCoroutine = 0; }else{ @@ -163358,7 +164723,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( iSrc = pLevel->iFrom; pItem = &pTabList->a[iSrc]; assert( pItem!=0 ); - pTab = pItem->pTab; + pTab = pItem->pSTab; assert( pTab!=0 ); sz = sqlite3LogEstToInt(pTab->nRowLogEst); if( sz<10000 ){ @@ -163389,7 +164754,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( int r1 = sqlite3GetTempRange(pParse, n); int jj; for(jj=0; jjpTable==pItem->pTab ); + assert( pIdx->pTable==pItem->pSTab ); sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iCur, jj, r1+jj); } sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0, r1, n); @@ -163427,6 +164792,20 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( #ifndef SQLITE_OMIT_VIRTUALTABLE +/* +** Return term iTerm of the WhereClause passed as the first argument. Terms +** are numbered from 0 upwards, starting with the terms in pWC->a[], then +** those in pWC->pOuter->a[] (if any), and so on. +*/ +static WhereTerm *termFromWhereClause(WhereClause *pWC, int iTerm){ + WhereClause *p; + for(p=pWC; p; p=p->pOuter){ + if( iTermnTerm ) return &p->a[iTerm]; + iTerm -= p->nTerm; + } + return 0; +} + /* ** Allocate and populate an sqlite3_index_info structure. It is the ** responsibility of the caller to eventually release the structure @@ -163453,9 +164832,10 @@ static sqlite3_index_info *allocateIndexInfo( const Table *pTab; int eDistinct = 0; ExprList *pOrderBy = pWInfo->pOrderBy; + WhereClause *p; assert( pSrc!=0 ); - pTab = pSrc->pTab; + pTab = pSrc->pSTab; assert( pTab!=0 ); assert( IsVirtual(pTab) ); @@ -163463,28 +164843,30 @@ static sqlite3_index_info *allocateIndexInfo( ** Mark each term with the TERM_OK flag. Set nTerm to the number of ** terms found. */ - for(i=nTerm=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - pTerm->wtFlags &= ~TERM_OK; - if( pTerm->leftCursor != pSrc->iCursor ) continue; - if( pTerm->prereqRight & mUnusable ) continue; - assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); - testcase( pTerm->eOperator & WO_IN ); - testcase( pTerm->eOperator & WO_ISNULL ); - testcase( pTerm->eOperator & WO_IS ); - testcase( pTerm->eOperator & WO_ALL ); - if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue; - if( pTerm->wtFlags & TERM_VNULL ) continue; + for(p=pWC, nTerm=0; p; p=p->pOuter){ + for(i=0, pTerm=p->a; inTerm; i++, pTerm++){ + pTerm->wtFlags &= ~TERM_OK; + if( pTerm->leftCursor != pSrc->iCursor ) continue; + if( pTerm->prereqRight & mUnusable ) continue; + assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); + testcase( pTerm->eOperator & WO_IN ); + testcase( pTerm->eOperator & WO_ISNULL ); + testcase( pTerm->eOperator & WO_IS ); + testcase( pTerm->eOperator & WO_ALL ); + if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue; + if( pTerm->wtFlags & TERM_VNULL ) continue; - assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); - assert( pTerm->u.x.leftColumn>=XN_ROWID ); - assert( pTerm->u.x.leftColumnnCol ); - if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 - && !constraintCompatibleWithOuterJoin(pTerm,pSrc) - ){ - continue; + assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); + assert( pTerm->u.x.leftColumn>=XN_ROWID ); + assert( pTerm->u.x.leftColumnnCol ); + if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 + && !constraintCompatibleWithOuterJoin(pTerm,pSrc) + ){ + continue; + } + nTerm++; + pTerm->wtFlags |= TERM_OK; } - nTerm++; - pTerm->wtFlags |= TERM_OK; } /* If the ORDER BY clause contains only columns in the current @@ -163559,53 +164941,69 @@ static sqlite3_index_info *allocateIndexInfo( pIdxInfo->aConstraint = pIdxCons; pIdxInfo->aOrderBy = pIdxOrderBy; pIdxInfo->aConstraintUsage = pUsage; + pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed; + if( HasRowid(pTab)==0 ){ + /* Ensure that all bits associated with PK columns are set. This is to + ** ensure they are available for cases like RIGHT joins or OR loops. */ + Index *pPk = sqlite3PrimaryKeyIndex((Table*)pTab); + assert( pPk!=0 ); + for(i=0; inKeyCol; i++){ + int iCol = pPk->aiColumn[i]; + assert( iCol>=0 ); + if( iCol>=BMS-1 ) iCol = BMS-1; + pIdxInfo->colUsed |= MASKBIT(iCol); + } + } pHidden->pWC = pWC; pHidden->pParse = pParse; pHidden->eDistinct = eDistinct; pHidden->mIn = 0; - for(i=j=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - u16 op; - if( (pTerm->wtFlags & TERM_OK)==0 ) continue; - pIdxCons[j].iColumn = pTerm->u.x.leftColumn; - pIdxCons[j].iTermOffset = i; - op = pTerm->eOperator & WO_ALL; - if( op==WO_IN ){ - if( (pTerm->wtFlags & TERM_SLICE)==0 ){ - pHidden->mIn |= SMASKBIT32(j); - } - op = WO_EQ; - } - if( op==WO_AUX ){ - pIdxCons[j].op = pTerm->eMatchOp; - }else if( op & (WO_ISNULL|WO_IS) ){ - if( op==WO_ISNULL ){ - pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_ISNULL; - }else{ - pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_IS; - } - }else{ - pIdxCons[j].op = (u8)op; - /* The direct assignment in the previous line is possible only because - ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The - ** following asserts verify this fact. */ - assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); - assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); - assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); - assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); - assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); - assert( pTerm->eOperator&(WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_AUX) ); - - if( op & (WO_LT|WO_LE|WO_GT|WO_GE) - && sqlite3ExprIsVector(pTerm->pExpr->pRight) - ){ - testcase( j!=i ); - if( j<16 ) mNoOmit |= (1 << j); - if( op==WO_LT ) pIdxCons[j].op = WO_LE; - if( op==WO_GT ) pIdxCons[j].op = WO_GE; + for(p=pWC, i=j=0; p; p=p->pOuter){ + int nLast = i+p->nTerm;; + for(pTerm=p->a; iwtFlags & TERM_OK)==0 ) continue; + pIdxCons[j].iColumn = pTerm->u.x.leftColumn; + pIdxCons[j].iTermOffset = i; + op = pTerm->eOperator & WO_ALL; + if( op==WO_IN ){ + if( (pTerm->wtFlags & TERM_SLICE)==0 ){ + pHidden->mIn |= SMASKBIT32(j); + } + op = WO_EQ; + } + if( op==WO_AUX ){ + pIdxCons[j].op = pTerm->eMatchOp; + }else if( op & (WO_ISNULL|WO_IS) ){ + if( op==WO_ISNULL ){ + pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_ISNULL; + }else{ + pIdxCons[j].op = SQLITE_INDEX_CONSTRAINT_IS; + } + }else{ + pIdxCons[j].op = (u8)op; + /* The direct assignment in the previous line is possible only because + ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The + ** following asserts verify this fact. */ + assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); + assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); + assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); + assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); + assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); + assert( pTerm->eOperator&(WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_AUX) ); + + if( op & (WO_LT|WO_LE|WO_GT|WO_GE) + && sqlite3ExprIsVector(pTerm->pExpr->pRight) + ){ + testcase( j!=i ); + if( j<16 ) mNoOmit |= (1 << j); + if( op==WO_LT ) pIdxCons[j].op = WO_LE; + if( op==WO_GT ) pIdxCons[j].op = WO_GE; + } } - } - j++; + j++; + } } assert( j==nTerm ); pIdxInfo->nConstraint = j; @@ -163625,6 +165023,17 @@ static sqlite3_index_info *allocateIndexInfo( return pIdxInfo; } +/* +** Free and zero the sqlite3_index_info.idxStr value if needed. +*/ +static void freeIdxStr(sqlite3_index_info *pIdxInfo){ + if( pIdxInfo->needToFreeIdxStr ){ + sqlite3_free(pIdxInfo->idxStr); + pIdxInfo->idxStr = 0; + pIdxInfo->needToFreeIdxStr = 0; + } +} + /* ** Free an sqlite3_index_info structure allocated by allocateIndexInfo() ** and possibly modified by xBestIndex methods. @@ -163640,6 +165049,7 @@ static void freeIndexInfo(sqlite3 *db, sqlite3_index_info *pIdxInfo){ sqlite3ValueFree(pHidden->aRhs[i]); /* IMP: R-14553-25174 */ pHidden->aRhs[i] = 0; } + freeIdxStr(pIdxInfo); sqlite3DbFree(db, pIdxInfo); } @@ -163660,9 +165070,11 @@ static void freeIndexInfo(sqlite3 *db, sqlite3_index_info *pIdxInfo){ ** that this is required. */ static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ - sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; int rc; + sqlite3_vtab *pVtab; + assert( IsVirtual(pTab) ); + pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; whereTraceIndexInfoInputs(p, pTab); pParse->db->nSchemaLock++; rc = pVtab->pModule->xBestIndex(pVtab, p); @@ -164354,7 +165766,7 @@ static int whereInScanEst( #endif /* SQLITE_ENABLE_STAT4 */ -#ifdef WHERETRACE_ENABLED +#if defined(WHERETRACE_ENABLED) || defined(SQLITE_DEBUG) /* ** Print the content of a WhereTerm object */ @@ -164398,6 +165810,9 @@ SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm){ sqlite3TreeViewExpr(0, pTerm->pExpr, 0); } } +SQLITE_PRIVATE void sqlite3ShowWhereTerm(WhereTerm *pTerm){ + sqlite3WhereTermPrint(pTerm, 0); +} #endif #ifdef WHERETRACE_ENABLED @@ -164429,17 +165844,19 @@ SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){ ** 1.002.001 t2.t2xy 2 f 010241 N 2 cost 0,56,31 */ SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC){ + WhereInfo *pWInfo; if( pWC ){ - WhereInfo *pWInfo = pWC->pWInfo; + pWInfo = pWC->pWInfo; int nb = 1+(pWInfo->pTabList->nSrc+3)/4; SrcItem *pItem = pWInfo->pTabList->a + p->iTab; - Table *pTab = pItem->pTab; + Table *pTab = pItem->pSTab; Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, p->iTab, nb, p->maskSelf, nb, p->prereq & mAll); sqlite3DebugPrintf(" %12s", pItem->zAlias ? pItem->zAlias : pTab->zName); }else{ + pWInfo = 0; sqlite3DebugPrintf("%c%2d.%03llx.%03llx %c%d", p->cId, p->iTab, p->maskSelf, p->prereq & 0xfff, p->cId, p->iTab); } @@ -164471,7 +165888,12 @@ SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause }else{ sqlite3DebugPrintf(" f %06x N %d", p->wsFlags, p->nLTerm); } - sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); + if( pWInfo && pWInfo->bStarUsed && p->rStarDelta!=0 ){ + sqlite3DebugPrintf(" cost %d,%d,%d delta=%d\n", + p->rSetup, p->rRun, p->nOut, p->rStarDelta); + }else{ + sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); + } if( p->nLTerm && (sqlite3WhereTrace & 0x4000)!=0 ){ int i; for(i=0; inLTerm; i++){ @@ -164605,7 +166027,7 @@ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ ** and Y has additional constraints that might speed the search that X lacks ** but the cost of running X is not more than the cost of running Y. ** -** In other words, return true if the cost relationwship between X and Y +** In other words, return true if the cost relationship between X and Y ** is inverted and needs to be adjusted. ** ** Case 1: @@ -164991,7 +166413,7 @@ static void whereLoopOutputAdjust( Expr *pRight = pTerm->pExpr->pRight; int k = 0; testcase( pTerm->pExpr->op==TK_IS ); - if( sqlite3ExprIsInteger(pRight, &k) && k>=(-1) && k<=1 ){ + if( sqlite3ExprIsInteger(pRight, &k, 0) && k>=(-1) && k<=1 ){ k = 10; }else{ k = 20; @@ -165288,7 +166710,7 @@ static int whereLoopAddBtreeIndex( || (iCol>=0 && nInMul==0 && saved_nEq==pProbe->nKeyCol-1) ){ if( iCol==XN_ROWID || pProbe->uniqNotNull - || (pProbe->nKeyCol==1 && pProbe->onError && eOp==WO_EQ) + || (pProbe->nKeyCol==1 && pProbe->onError && (eOp & WO_EQ)) ){ pNew->wsFlags |= WHERE_ONEROW; }else{ @@ -165421,7 +166843,7 @@ static int whereLoopAddBtreeIndex( ** 2. Stepping forward in the index pNew->nOut times to find all ** additional matching entries. */ - assert( pSrc->pTab->szTabRow>0 ); + assert( pSrc->pSTab->szTabRow>0 ); if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){ /* The pProbe->szIdxRow is low for an IPK table since the interior ** pages are small. Thus szIdxRow gives a good estimate of seek cost. @@ -165429,7 +166851,7 @@ static int whereLoopAddBtreeIndex( ** under-estimate the scanning cost. */ rCostIdx = pNew->nOut + 16; }else{ - rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; + rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pSTab->szTabRow; } rCostIdx = sqlite3LogEstAdd(rLogSize, rCostIdx); @@ -165583,7 +167005,6 @@ static int whereUsablePartialIndex( if( !whereUsablePartialIndex(iTab,jointype,pWC,pWhere->pLeft) ) return 0; pWhere = pWhere->pRight; } - if( pParse->db->flags & SQLITE_EnableQPSG ) pParse = 0; for(i=0, pTerm=pWC->a; inTerm; i++, pTerm++){ Expr *pExpr; pExpr = pTerm->pExpr; @@ -165894,9 +167315,9 @@ static int whereLoopAddBtree( pWInfo = pBuilder->pWInfo; pTabList = pWInfo->pTabList; pSrc = pTabList->a + pNew->iTab; - pTab = pSrc->pTab; + pTab = pSrc->pSTab; pWC = pBuilder->pWC; - assert( !IsVirtual(pSrc->pTab) ); + assert( !IsVirtual(pSrc->pSTab) ); if( pSrc->fg.isIndexedBy ){ assert( pSrc->fg.isCte==0 ); @@ -165921,7 +167342,7 @@ static int whereLoopAddBtree( sPk.idxType = SQLITE_IDXTYPE_IPK; aiRowEstPk[0] = pTab->nRowLogEst; aiRowEstPk[1] = 0; - pFirst = pSrc->pTab->pIndex; + pFirst = pSrc->pSTab->pIndex; if( pSrc->fg.notIndexed==0 ){ /* The real indices of the table are only considered if the ** NOT INDEXED qualifier is omitted from the FROM clause */ @@ -165938,7 +167359,6 @@ static int whereLoopAddBtree( && (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0 && !pSrc->fg.isIndexedBy /* Has no INDEXED BY clause */ && !pSrc->fg.notIndexed /* Has no NOT INDEXED clause */ - && HasRowid(pTab) /* Not WITHOUT ROWID table. (FIXME: Why not?) */ && !pSrc->fg.isCorrelated /* Not a correlated subquery */ && !pSrc->fg.isRecursive /* Not a recursive common table expression. */ && (pSrc->fg.jointype & JT_RIGHT)==0 /* Not the right tab of a RIGHT JOIN */ @@ -166011,6 +167431,7 @@ static int whereLoopAddBtree( pNew->prereq = mPrereq; pNew->nOut = rSize; pNew->u.btree.pIndex = pProbe; + pNew->u.btree.pOrderBy = 0; b = indexMightHelpWithOrderBy(pBuilder, pProbe, pSrc->iCursor); /* The ONEPASS_DESIRED flags never occurs together with ORDER BY */ @@ -166040,6 +167461,10 @@ static int whereLoopAddBtree( #endif ApplyCostMultiplier(pNew->rRun, pTab->costMult); whereLoopOutputAdjust(pWC, pNew, rSize); + if( pSrc->fg.isSubquery ){ + if( pSrc->fg.viaCoroutine ) pNew->wsFlags |= WHERE_COROUTINE; + pNew->u.btree.pOrderBy = pSrc->u4.pSubq->pSelect->pOrderBy; + } rc = whereLoopInsert(pBuilder, pNew); pNew->nOut = rSize; if( rc ) break; @@ -166242,7 +167667,7 @@ static int whereLoopAddVirtualOne( ** arguments mUsable and mExclude. */ pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint; for(i=0; ia[pIdxCons->iTermOffset]; + WhereTerm *pTerm = termFromWhereClause(pWC, pIdxCons->iTermOffset); pIdxCons->usable = 0; if( (pTerm->prereqRight & mUsable)==pTerm->prereqRight && (pTerm->eOperator & mExclude)==0 @@ -166261,11 +167686,10 @@ static int whereLoopAddVirtualOne( pIdxInfo->estimatedCost = SQLITE_BIG_DBL / (double)2; pIdxInfo->estimatedRows = 25; pIdxInfo->idxFlags = 0; - pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed; pHidden->mHandleIn = 0; /* Invoke the virtual table xBestIndex() method */ - rc = vtabBestIndex(pParse, pSrc->pTab, pIdxInfo); + rc = vtabBestIndex(pParse, pSrc->pSTab, pIdxInfo); if( rc ){ if( rc==SQLITE_CONSTRAINT ){ /* If the xBestIndex method returns SQLITE_CONSTRAINT, that means @@ -166273,6 +167697,7 @@ static int whereLoopAddVirtualOne( ** Make no entries in the loop table. */ WHERETRACE(0xffffffff, (" ^^^^--- non-viable plan rejected!\n")); + freeIdxStr(pIdxInfo); return SQLITE_OK; } return rc; @@ -166290,18 +167715,17 @@ static int whereLoopAddVirtualOne( int j = pIdxCons->iTermOffset; if( iTerm>=nConstraint || j<0 - || j>=pWC->nTerm + || (pTerm = termFromWhereClause(pWC, j))==0 || pNew->aLTerm[iTerm]!=0 || pIdxCons->usable==0 ){ - sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pTab->zName); - testcase( pIdxInfo->needToFreeIdxStr ); + sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pSTab->zName); + freeIdxStr(pIdxInfo); return SQLITE_ERROR; } testcase( iTerm==nConstraint-1 ); testcase( j==0 ); testcase( j==pWC->nTerm-1 ); - pTerm = &pWC->a[j]; pNew->prereq |= pTerm->prereqRight; assert( iTermnLSlot ); pNew->aLTerm[iTerm] = pTerm; @@ -166346,11 +167770,7 @@ static int whereLoopAddVirtualOne( ** the plan cannot be used. In these cases set variable *pbRetryLimit ** to true to tell the caller to retry with LIMIT and OFFSET ** disabled. */ - if( pIdxInfo->needToFreeIdxStr ){ - sqlite3_free(pIdxInfo->idxStr); - pIdxInfo->idxStr = 0; - pIdxInfo->needToFreeIdxStr = 0; - } + freeIdxStr(pIdxInfo); *pbRetryLimit = 1; return SQLITE_OK; } @@ -166362,8 +167782,8 @@ static int whereLoopAddVirtualOne( if( pNew->aLTerm[i]==0 ){ /* The non-zero argvIdx values must be contiguous. Raise an ** error if they are not */ - sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pTab->zName); - testcase( pIdxInfo->needToFreeIdxStr ); + sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pSTab->zName); + freeIdxStr(pIdxInfo); return SQLITE_ERROR; } } @@ -166374,6 +167794,7 @@ static int whereLoopAddVirtualOne( pNew->u.vtab.idxStr = pIdxInfo->idxStr; pNew->u.vtab.isOrdered = (i8)(pIdxInfo->orderByConsumed ? pIdxInfo->nOrderBy : 0); + pNew->u.vtab.bIdxNumHex = (pIdxInfo->idxFlags&SQLITE_INDEX_SCAN_HEX)!=0; pNew->rSetup = 0; pNew->rRun = sqlite3LogEstFromDouble(pIdxInfo->estimatedCost); pNew->nOut = sqlite3LogEst(pIdxInfo->estimatedRows); @@ -166418,7 +167839,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info *pIdxInfo, int if( iCons>=0 && iConsnConstraint ){ CollSeq *pC = 0; int iTerm = pIdxInfo->aConstraint[iCons].iTermOffset; - Expr *pX = pHidden->pWC->a[iTerm].pExpr; + Expr *pX = termFromWhereClause(pHidden->pWC, iTerm)->pExpr; if( pX->pLeft ){ pC = sqlite3ExprCompareCollSeq(pHidden->pParse, pX); } @@ -166464,7 +167885,9 @@ SQLITE_API int sqlite3_vtab_rhs_value( rc = SQLITE_MISUSE_BKPT; /* EV: R-30545-25046 */ }else{ if( pH->aRhs[iCons]==0 ){ - WhereTerm *pTerm = &pH->pWC->a[pIdxInfo->aConstraint[iCons].iTermOffset]; + WhereTerm *pTerm = termFromWhereClause( + pH->pWC, pIdxInfo->aConstraint[iCons].iTermOffset + ); rc = sqlite3ValueFromExpr( pH->pParse->db, pTerm->pExpr->pRight, ENC(pH->pParse->db), SQLITE_AFF_BLOB, &pH->aRhs[iCons] @@ -166562,7 +167985,7 @@ static int whereLoopAddVirtual( pWC = pBuilder->pWC; pNew = pBuilder->pNew; pSrc = &pWInfo->pTabList->a[pNew->iTab]; - assert( IsVirtual(pSrc->pTab) ); + assert( IsVirtual(pSrc->pSTab) ); p = allocateIndexInfo(pWInfo, pWC, mUnusable, pSrc, &mNoOmit); if( p==0 ) return SQLITE_NOMEM_BKPT; pNew->rSetup = 0; @@ -166576,7 +167999,7 @@ static int whereLoopAddVirtual( } /* First call xBestIndex() with all constraints usable. */ - WHERETRACE(0x800, ("BEGIN %s.addVirtual()\n", pSrc->pTab->zName)); + WHERETRACE(0x800, ("BEGIN %s.addVirtual()\n", pSrc->pSTab->zName)); WHERETRACE(0x800, (" VirtualOne: all usable\n")); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn, &bRetry @@ -166620,9 +168043,8 @@ static int whereLoopAddVirtual( Bitmask mNext = ALLBITS; assert( mNext>0 ); for(i=0; ia[p->aConstraint[i].iTermOffset].prereqRight & ~mPrereq - ); + int iTerm = p->aConstraint[i].iTermOffset; + Bitmask mThis = termFromWhereClause(pWC, iTerm)->prereqRight & ~mPrereq; if( mThis>mPrev && mThisneedToFreeIdxStr ) sqlite3_free(p->idxStr); freeIndexInfo(pParse->db, p); - WHERETRACE(0x800, ("END %s.addVirtual(), rc=%d\n", pSrc->pTab->zName, rc)); + WHERETRACE(0x800, ("END %s.addVirtual(), rc=%d\n", pSrc->pSTab->zName, rc)); return rc; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ @@ -166732,7 +168153,7 @@ static int whereLoopAddOr( } #endif #ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pItem->pTab) ){ + if( IsVirtual(pItem->pSTab) ){ rc = whereLoopAddVirtual(&sSubBuild, mPrereq, mUnusable); }else #endif @@ -166846,7 +168267,7 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ mPrereq = 0; } #ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pItem->pTab) ){ + if( IsVirtual(pItem->pSTab) ){ SrcItem *p; for(p=&pItem[1]; pfg.jointype & (JT_OUTER|JT_CROSS)) ){ @@ -166878,6 +168299,97 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ return rc; } +/* Implementation of the order-by-subquery optimization: +** +** WhereLoop pLoop, which the iLoop-th term of the nested loop, is really +** a subquery or CTE that has an ORDER BY clause. See if any of the terms +** in the subquery ORDER BY clause will satisfy pOrderBy from the outer +** query. Mark off all satisfied terms (by setting bits in *pOBSat) and +** return TRUE if they do. If not, return false. +** +** Example: +** +** CREATE TABLE t1(a,b,c, PRIMARY KEY(a,b)); +** CREATE TABLE t2(x,y); +** WITH t3(p,q) AS MATERIALIZED (SELECT x+y, x-y FROM t2 ORDER BY x+y) +** SELECT * FROM t3 JOIN t1 ON a=q ORDER BY p, b; +** +** The CTE named "t3" comes out in the natural order of "p", so the first +** first them of "ORDER BY p,b" is satisfied by a sequential scan of "t3" +** and sorting only needs to occur on the second term "b". +** +** Limitations: +** +** (1) The optimization is not applied if the outer ORDER BY contains +** a COLLATE clause. The optimization might be applied if the +** outer ORDER BY uses NULLS FIRST, NULLS LAST, ASC, and/or DESC as +** long as the subquery ORDER BY does the same. But if the +** outer ORDER BY uses COLLATE, even a redundant COLLATE, the +** optimization is bypassed. +** +** (2) The subquery ORDER BY terms must exactly match subquery result +** columns, including any COLLATE annotations. This routine relies +** on iOrderByCol to do matching between order by terms and result +** columns, and iOrderByCol will not be set if the result column +** and ORDER BY collations differ. +** +** (3) The subquery and outer ORDER BY can be in opposite directions as +** long as the subquery is materialized. If the subquery is +** implemented as a co-routine, the sort orders must be in the same +** direction because there is no way to run a co-routine backwards. +*/ +static SQLITE_NOINLINE int wherePathMatchSubqueryOB( + WhereInfo *pWInfo, /* The WHERE clause */ + WhereLoop *pLoop, /* The nested loop term that is a subquery */ + int iLoop, /* Which level of the nested loop. 0==outermost */ + int iCur, /* Cursor used by the this loop */ + ExprList *pOrderBy, /* The ORDER BY clause on the whole query */ + Bitmask *pRevMask, /* When loops need to go in reverse order */ + Bitmask *pOBSat /* Which terms of pOrderBy are satisfied so far */ +){ + int iOB; /* Index into pOrderBy->a[] */ + int jSub; /* Index into pSubOB->a[] */ + u8 rev = 0; /* True if iOB and jSub sort in opposite directions */ + u8 revIdx = 0; /* Sort direction for jSub */ + Expr *pOBExpr; /* Current term of outer ORDER BY */ + ExprList *pSubOB; /* Complete ORDER BY on the subquery */ + + pSubOB = pLoop->u.btree.pOrderBy; + assert( pSubOB!=0 ); + for(iOB=0; (MASKBIT(iOB) & *pOBSat)!=0; iOB++){} + for(jSub=0; jSubnExpr && iOBnExpr; jSub++, iOB++){ + if( pSubOB->a[jSub].u.x.iOrderByCol==0 ) break; + pOBExpr = pOrderBy->a[iOB].pExpr; + if( pOBExpr->op!=TK_COLUMN && pOBExpr->op!=TK_AGG_COLUMN ) break; + if( pOBExpr->iTable!=iCur ) break; + if( pOBExpr->iColumn!=pSubOB->a[jSub].u.x.iOrderByCol-1 ) break; + if( (pWInfo->wctrlFlags & WHERE_GROUPBY)==0 ){ + u8 sfOB = pOrderBy->a[iOB].fg.sortFlags; /* sortFlags for iOB */ + u8 sfSub = pSubOB->a[jSub].fg.sortFlags; /* sortFlags for jSub */ + if( (sfSub & KEYINFO_ORDER_BIGNULL) != (sfOB & KEYINFO_ORDER_BIGNULL) ){ + break; + } + revIdx = sfSub & KEYINFO_ORDER_DESC; + if( jSub>0 ){ + if( (rev^revIdx)!=(sfOB & KEYINFO_ORDER_DESC) ){ + break; + } + }else{ + rev = revIdx ^ (sfOB & KEYINFO_ORDER_DESC); + if( rev ){ + if( (pLoop->wsFlags & WHERE_COROUTINE)!=0 ){ + /* Cannot run a co-routine in reverse order */ + break; + } + *pRevMask |= MASKBIT(iLoop); + } + } + } + *pOBSat |= MASKBIT(iOB); + } + return jSub>0; +} + /* ** Examine a WherePath (with the addition of the extra WhereLoop of the 6th ** parameters) to see if it outputs rows in the requested ORDER BY @@ -167023,9 +168535,18 @@ static i8 wherePathSatisfiesOrderBy( if( (pLoop->wsFlags & WHERE_ONEROW)==0 ){ if( pLoop->wsFlags & WHERE_IPK ){ + if( pLoop->u.btree.pOrderBy + && OptimizationEnabled(db, SQLITE_OrderBySubq) + && wherePathMatchSubqueryOB(pWInfo,pLoop,iLoop,iCur, + pOrderBy,pRevMask, &obSat) + ){ + nColumn = 0; + isOrderDistinct = 0; + }else{ + nColumn = 1; + } pIndex = 0; nKeyCol = 0; - nColumn = 1; }else if( (pIndex = pLoop->u.btree.pIndex)==0 || pIndex->bUnordered ){ return 0; }else{ @@ -167035,7 +168556,7 @@ static i8 wherePathSatisfiesOrderBy( assert( pIndex->aiColumn[nColumn-1]==XN_ROWID || !HasRowid(pIndex->pTable)); /* All relevant terms of the index must also be non-NULL in order - ** for isOrderDistinct to be true. So the isOrderDistint value + ** for isOrderDistinct to be true. So the isOrderDistinct value ** computed here might be a false positive. Corrections will be ** made at tag-20210426-1 below */ isOrderDistinct = IsUniqueIndex(pIndex) @@ -167120,7 +168641,7 @@ static i8 wherePathSatisfiesOrderBy( } /* Find the ORDER BY term that corresponds to the j-th column - ** of the index and mark that ORDER BY term off + ** of the index and mark that ORDER BY term having been satisfied. */ isMatch = 0; for(i=0; bOnce && inLevel; /* Number of terms in the join */ + WhereLoop *pWLoop; /* For looping over WhereLoops */ + +#ifdef SQLITE_DEBUG + /* The star-query detection code below makes use of the following + ** properties of the WhereLoop list, so verify them before + ** continuing: + ** (1) .maskSelf is the bitmask corresponding to .iTab + ** (2) The WhereLoop list is in ascending .iTab order + */ + for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ + assert( pWLoop->maskSelf==MASKBIT(pWLoop->iTab) ); + assert( pWLoop->pNextLoop==0 || pWLoop->iTab<=pWLoop->pNextLoop->iTab ); + } +#endif /* SQLITE_DEBUG */ + + if( nLoop>=5 + && !pWInfo->bStarDone + && OptimizationEnabled(pWInfo->pParse->db, SQLITE_StarQuery) + ){ + SrcItem *aFromTabs; /* All terms of the FROM clause */ + int iFromIdx; /* Term of FROM clause is the candidate fact-table */ + Bitmask m; /* Bitmask for candidate fact-table */ + Bitmask mSelfJoin = 0; /* Tables that cannot be dimension tables */ + WhereLoop *pStart; /* Where to start searching for dimension-tables */ + + pWInfo->bStarDone = 1; /* Only do this computation once */ + + /* Look for fact tables with four or more dimensions where the + ** dimension tables are not separately from the fact tables by an outer + ** or cross join. Adjust cost weights if found. + */ + assert( !pWInfo->bStarUsed ); + aFromTabs = pWInfo->pTabList->a; + pStart = pWInfo->pLoops; + for(iFromIdx=0, m=1; iFromIdxfg.jointype & (JT_OUTER|JT_CROSS))!=0 ){ + /* If the candidate fact-table is the right table of an outer join + ** restrict the search for dimension-tables to be tables to the right + ** of the fact-table. */ + if( iFromIdx+4 > nLoop ) break; /* Impossible to reach nDep>=4 */ + while( pStart && pStart->iTab<=iFromIdx ){ + pStart = pStart->pNextLoop; + } + } + for(pWLoop=pStart; pWLoop; pWLoop=pWLoop->pNextLoop){ + if( (aFromTabs[pWLoop->iTab].fg.jointype & (JT_OUTER|JT_CROSS))!=0 ){ + /* Fact-tables and dimension-tables cannot be separated by an + ** outer join (at least for the definition of fact- and dimension- + ** used by this heuristic). */ + break; + } + if( (pWLoop->prereq & m)!=0 /* pWInfo depends on iFromIdx */ + && (pWLoop->maskSelf & mSeen)==0 /* pWInfo not already a dependency */ + && (pWLoop->maskSelf & mSelfJoin)==0 /* Not a self-join */ + ){ + if( aFromTabs[pWLoop->iTab].pSTab==pFactTab->pSTab ){ + mSelfJoin |= m; + }else{ + nDep++; + mSeen |= pWLoop->maskSelf; + } + } + } + if( nDep<=3 ) continue; + + /* If we reach this point, it means that pFactTab is a fact table + ** with four or more dimensions connected by inner joins. Proceed + ** to make cost adjustments. */ + +#ifdef WHERETRACE_ENABLED + /* Make sure rStarDelta values are initialized */ + if( !pWInfo->bStarUsed ){ + for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ + pWLoop->rStarDelta = 0; + } + } +#endif + pWInfo->bStarUsed = 1; + + /* Compute the maximum cost of any WhereLoop for the + ** fact table plus one epsilon */ + mxRun = LOGEST_MIN; + for(pWLoop=pStart; pWLoop; pWLoop=pWLoop->pNextLoop){ + if( pWLoop->iTabiTab>iFromIdx ) break; + if( pWLoop->rRun>mxRun ) mxRun = pWLoop->rRun; + } + if( ALWAYS(mxRunpNextLoop){ + if( (pWLoop->maskSelf & mSeen)==0 ) continue; + if( pWLoop->nLTerm ) continue; + if( pWLoop->rRuniTab; + sqlite3DebugPrintf( + "Increase SCAN cost of dimension %s(%d) of fact %s(%d) to %d\n", + pDim->zAlias ? pDim->zAlias: pDim->pSTab->zName, pWLoop->iTab, + pFactTab->zAlias ? pFactTab->zAlias : pFactTab->pSTab->zName, + iFromIdx, mxRun + ); + } + pWLoop->rStarDelta = mxRun - pWLoop->rRun; +#endif /* WHERETRACE_ENABLED */ + pWLoop->rRun = mxRun; + } + } + } +#ifdef WHERETRACE_ENABLED /* 0x80000 */ + if( (sqlite3WhereTrace & 0x80000)!=0 && pWInfo->bStarUsed ){ + sqlite3DebugPrintf("WhereLoops changed by star-query heuristic:\n"); + for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ + if( pWLoop->rStarDelta ){ + sqlite3WhereLoopPrint(pWLoop, &pWInfo->sWC); + } + } + } +#endif + } + return pWInfo->bStarUsed ? 18 : 12; +} + +/* +** Two WhereLoop objects, pCandidate and pBaseline, are known to have the +** same cost. Look deep into each to see if pCandidate is even slightly +** better than pBaseline. Return false if it is, if pCandidate is is preferred. +** Return true if pBaseline is preferred or if we cannot tell the difference. +** +** Result Meaning +** -------- ---------------------------------------------------------- +** true We cannot tell the difference in pCandidate and pBaseline +** false pCandidate seems like a better choice than pBaseline +*/ +static SQLITE_NOINLINE int whereLoopIsNoBetter( + const WhereLoop *pCandidate, + const WhereLoop *pBaseline +){ + if( (pCandidate->wsFlags & WHERE_INDEXED)==0 ) return 1; + if( (pBaseline->wsFlags & WHERE_INDEXED)==0 ) return 1; + if( pCandidate->u.btree.pIndex->szIdxRow < + pBaseline->u.btree.pIndex->szIdxRow ) return 0; + return 1; +} + /* ** Given the list of WhereLoop objects at pWInfo->pLoops, this routine ** attempts to find the lowest cost path that visits each WhereLoop @@ -167348,7 +169079,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ int mxI = 0; /* Index of next entry to replace */ int nOrderBy; /* Number of ORDER BY clause terms */ LogEst mxCost = 0; /* Maximum cost of a set of paths */ - LogEst mxUnsorted = 0; /* Maximum unsorted cost of a set of path */ + LogEst mxUnsort = 0; /* Maximum unsorted cost of a set of path */ int nTo, nFrom; /* Number of valid entries in aTo[] and aFrom[] */ WherePath *aFrom; /* All nFrom paths at the previous level */ WherePath *aTo; /* The nTo best paths at the current level */ @@ -167362,13 +169093,27 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ pParse = pWInfo->pParse; nLoop = pWInfo->nLevel; - /* TUNING: For simple queries, only the best path is tracked. - ** For 2-way joins, the 5 best paths are followed. - ** For joins of 3 or more tables, track the 10 best paths */ - mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10); - assert( nLoop<=pWInfo->pTabList->nSrc ); WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d, nQueryLoop=%d)\n", nRowEst, pParse->nQueryLoop)); + /* TUNING: mxChoice is the maximum number of possible paths to preserve + ** at each step. Based on the number of loops in the FROM clause: + ** + ** nLoop mxChoice + ** ----- -------- + ** 1 1 // the most common case + ** 2 5 + ** 3+ 12 or 18 // see computeMxChoice() + */ + if( nLoop<=1 ){ + mxChoice = 1; + }else if( nLoop==2 ){ + mxChoice = 5; + }else if( pParse->nErr ){ + mxChoice = 1; + }else{ + mxChoice = computeMxChoice(pWInfo); + } + assert( nLoop<=pWInfo->pTabList->nSrc ); /* If nRowEst is zero and there is an ORDER BY clause, ignore it. In this ** case the purpose of this call is to estimate the number of rows returned @@ -167433,7 +169178,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ for(pWLoop=pWInfo->pLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ LogEst nOut; /* Rows visited by (pFrom+pWLoop) */ LogEst rCost; /* Cost of path (pFrom+pWLoop) */ - LogEst rUnsorted; /* Unsorted cost of (pFrom+pWLoop) */ + LogEst rUnsort; /* Unsorted cost of (pFrom+pWLoop) */ i8 isOrdered; /* isOrdered for (pFrom+pWLoop) */ Bitmask maskNew; /* Mask of src visited by (..) */ Bitmask revMask; /* Mask of rev-order loops for (..) */ @@ -167451,8 +169196,11 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ /* At this point, pWLoop is a candidate to be the next loop. ** Compute its cost */ - rUnsorted = sqlite3LogEstAdd(pWLoop->rSetup,pWLoop->rRun + pFrom->nRow); - rUnsorted = sqlite3LogEstAdd(rUnsorted, pFrom->rUnsorted); + rUnsort = pWLoop->rRun + pFrom->nRow; + if( pWLoop->rSetup ){ + rUnsort = sqlite3LogEstAdd(pWLoop->rSetup, rUnsort); + } + rUnsort = sqlite3LogEstAdd(rUnsort, pFrom->rUnsort); nOut = pFrom->nRow + pWLoop->nOut; maskNew = pFrom->maskLoop | pWLoop->maskSelf; isOrdered = pFrom->isOrdered; @@ -167474,15 +169222,15 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ ** extra encouragement to the query planner to select a plan ** where the rows emerge in the correct order without any sorting ** required. */ - rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 3; + rCost = sqlite3LogEstAdd(rUnsort, aSortCost[isOrdered]) + 3; WHERETRACE(0x002, ("---- sort cost=%-3d (%d/%d) increases cost %3d to %-3d\n", aSortCost[isOrdered], (nOrderBy-isOrdered), nOrderBy, - rUnsorted, rCost)); + rUnsort, rCost)); }else{ - rCost = rUnsorted; - rUnsorted -= 2; /* TUNING: Slight bias in favor of no-sort plans */ + rCost = rUnsort; + rUnsort -= 2; /* TUNING: Slight bias in favor of no-sort plans */ } /* Check to see if pWLoop should be added to the set of @@ -167496,6 +169244,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ ** to (pTo->isOrdered==(-1))==(isOrdered==(-1))" for the range ** of legal values for isOrdered, -1..64. */ + testcase( nTo==0 ); for(jj=0, pTo=aTo; jjmaskLoop==maskNew && ((pTo->isOrdered^isOrdered)&0x80)==0 @@ -167507,7 +169256,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ if( jj>=nTo ){ /* None of the existing best-so-far paths match the candidate. */ if( nTo>=mxChoice - && (rCost>mxCost || (rCost==mxCost && rUnsorted>=mxUnsorted)) + && (rCost>mxCost || (rCost==mxCost && rUnsort>=mxUnsort)) ){ /* The current candidate is no better than any of the mxChoice ** paths currently in the best-so-far buffer. So discard @@ -167515,7 +169264,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf("Skip %s cost=%-3d,%3d,%3d order=%c\n", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); } #endif @@ -167534,7 +169283,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf("New %s cost=%-3d,%3d,%3d order=%c\n", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); } #endif @@ -167545,24 +169294,23 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ ** pTo or if the candidate should be skipped. ** ** The conditional is an expanded vector comparison equivalent to: - ** (pTo->rCost,pTo->nRow,pTo->rUnsorted) <= (rCost,nOut,rUnsorted) + ** (pTo->rCost,pTo->nRow,pTo->rUnsort) <= (rCost,nOut,rUnsort) */ - if( pTo->rCostrCost==rCost - && (pTo->nRownRow==nOut && pTo->rUnsorted<=rUnsorted) - ) - ) + if( (pTo->rCostrCost==rCost && pTo->nRowrCost==rCost && pTo->nRow==nOut && pTo->rUnsortrCost==rCost && pTo->nRow==nOut && pTo->rUnsort==rUnsort + && whereLoopIsNoBetter(pWLoop, pTo->aLoop[iLoop]) ) ){ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf( "Skip %s cost=%-3d,%3d,%3d order=%c", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); sqlite3DebugPrintf(" vs %s cost=%-3d,%3d,%3d order=%c\n", wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, - pTo->rUnsorted, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); + pTo->rUnsort, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); } #endif /* Discard the candidate path from further consideration */ @@ -167576,11 +169324,11 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf( "Update %s cost=%-3d,%3d,%3d order=%c", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsorted, + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, rUnsort, isOrdered>=0 ? isOrdered+'0' : '?'); sqlite3DebugPrintf(" was %s cost=%-3d,%3d,%3d order=%c\n", wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, - pTo->rUnsorted, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); + pTo->rUnsort, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); } #endif } @@ -167589,20 +169337,20 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ pTo->revLoop = revMask; pTo->nRow = nOut; pTo->rCost = rCost; - pTo->rUnsorted = rUnsorted; + pTo->rUnsort = rUnsort; pTo->isOrdered = isOrdered; memcpy(pTo->aLoop, pFrom->aLoop, sizeof(WhereLoop*)*iLoop); pTo->aLoop[iLoop] = pWLoop; if( nTo>=mxChoice ){ mxI = 0; mxCost = aTo[0].rCost; - mxUnsorted = aTo[0].nRow; + mxUnsort = aTo[0].nRow; for(jj=1, pTo=&aTo[1]; jjrCost>mxCost - || (pTo->rCost==mxCost && pTo->rUnsorted>mxUnsorted) + || (pTo->rCost==mxCost && pTo->rUnsort>mxUnsort) ){ mxCost = pTo->rCost; - mxUnsorted = pTo->rUnsorted; + mxUnsort = pTo->rUnsort; mxI = jj; } } @@ -167612,17 +169360,32 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ #ifdef WHERETRACE_ENABLED /* >=2 */ if( sqlite3WhereTrace & 0x02 ){ + LogEst rMin, rFloor = 0; + int nDone = 0; + int nProgress; sqlite3DebugPrintf("---- after round %d ----\n", iLoop); - for(ii=0, pTo=aTo; iirCost, pTo->nRow, - pTo->isOrdered>=0 ? (pTo->isOrdered+'0') : '?'); - if( pTo->isOrdered>0 ){ - sqlite3DebugPrintf(" rev=0x%llx\n", pTo->revLoop); - }else{ - sqlite3DebugPrintf("\n"); + do{ + nProgress = 0; + rMin = 0x7fff; + for(ii=0, pTo=aTo; iirCost>rFloor && pTo->rCostrCost; + } + for(ii=0, pTo=aTo; iirCost==rMin ){ + sqlite3DebugPrintf(" %s cost=%-3d nrow=%-3d order=%c", + wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, + pTo->isOrdered>=0 ? (pTo->isOrdered+'0') : '?'); + if( pTo->isOrdered>0 ){ + sqlite3DebugPrintf(" rev=0x%llx\n", pTo->revLoop); + }else{ + sqlite3DebugPrintf("\n"); + } + nDone++; + nProgress++; + } } - } + rFloor = rMin; + }while( nDone0 ); } #endif @@ -167717,6 +169480,9 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ } pWInfo->nRowOut = pFrom->nRow; +#ifdef WHERETRACE_ENABLED + pWInfo->rTotalCost = pFrom->rCost; +#endif /* Free temporary memory and return success */ sqlite3StackFreeNN(pParse->db, pSpace); @@ -167827,7 +169593,7 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){ if( pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE ) return 0; assert( pWInfo->pTabList->nSrc>=1 ); pItem = pWInfo->pTabList->a; - pTab = pItem->pTab; + pTab = pItem->pSTab; if( IsVirtual(pTab) ) return 0; if( pItem->fg.isIndexedBy || pItem->fg.notIndexed ){ testcase( pItem->fg.isIndexedBy ); @@ -168017,6 +169783,7 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( WhereTerm *pTerm, *pEnd; SrcItem *pItem; WhereLoop *pLoop; + Bitmask m1; pLoop = pWInfo->a[i].pWLoop; pItem = &pWInfo->pTabList->a[pLoop->iTab]; if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))!=JT_LEFT ) continue; @@ -168037,13 +169804,16 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( } if( hasRightJoin && ExprHasProperty(pTerm->pExpr, EP_InnerON) - && pTerm->pExpr->w.iJoin==pItem->iCursor + && NEVER(pTerm->pExpr->w.iJoin==pItem->iCursor) ){ break; /* restriction (5) */ } } if( pTerm drop loop %c not used\n", pLoop->cId)); + WHERETRACE(0xffffffff,("-> omit unused FROM-clause term %c\n",pLoop->cId)); + m1 = MASKBIT(i)-1; + testcase( ((pWInfo->revMask>>1) & ~m1)!=0 ); + pWInfo->revMask = (m1 & pWInfo->revMask) | ((pWInfo->revMask>>1) & ~m1); notReady &= ~pLoop->maskSelf; for(pTerm=pWInfo->sWC.a; pTermprereqAll & pLoop->maskSelf)!=0 ){ @@ -168090,7 +169860,7 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( WhereLoop *pLoop = pWInfo->a[i].pWLoop; const unsigned int reqFlags = (WHERE_SELFCULL|WHERE_COLUMN_EQ); SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab]; - Table *pTab = pItem->pTab; + Table *pTab = pItem->pSTab; if( (pTab->tabFlags & TF_HasStat1)==0 ) break; pTab->tabFlags |= TF_MaybeReanalyze; if( i>=1 @@ -168113,58 +169883,6 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( } } -/* -** Expression Node callback for sqlite3ExprCanReturnSubtype(). -** -** Only a function call is able to return a subtype. So if the node -** is not a function call, return WRC_Prune immediately. -** -** A function call is able to return a subtype if it has the -** SQLITE_RESULT_SUBTYPE property. -** -** Assume that every function is able to pass-through a subtype from -** one of its argument (using sqlite3_result_value()). Most functions -** are not this way, but we don't have a mechanism to distinguish those -** that are from those that are not, so assume they all work this way. -** That means that if one of its arguments is another function and that -** other function is able to return a subtype, then this function is -** able to return a subtype. -*/ -static int exprNodeCanReturnSubtype(Walker *pWalker, Expr *pExpr){ - int n; - FuncDef *pDef; - sqlite3 *db; - if( pExpr->op!=TK_FUNCTION ){ - return WRC_Prune; - } - assert( ExprUseXList(pExpr) ); - db = pWalker->pParse->db; - n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; - pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); - if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ - pWalker->eCode = 1; - return WRC_Prune; - } - return WRC_Continue; -} - -/* -** Return TRUE if expression pExpr is able to return a subtype. -** -** A TRUE return does not guarantee that a subtype will be returned. -** It only indicates that a subtype return is possible. False positives -** are acceptable as they only disable an optimization. False negatives, -** on the other hand, can lead to incorrect answers. -*/ -static int sqlite3ExprCanReturnSubtype(Parse *pParse, Expr *pExpr){ - Walker w; - memset(&w, 0, sizeof(w)); - w.pParse = pParse; - w.xExprCallback = exprNodeCanReturnSubtype; - sqlite3WalkExpr(&w, pExpr); - return w.eCode; -} - /* ** The index pIdx is used by a query and contains one or more expressions. ** In other words pIdx is an index on an expression. iIdxCur is the cursor @@ -168198,12 +169916,6 @@ static SQLITE_NOINLINE void whereAddIndexedExpr( continue; } if( sqlite3ExprIsConstant(0,pExpr) ) continue; - if( pExpr->op==TK_FUNCTION && sqlite3ExprCanReturnSubtype(pParse,pExpr) ){ - /* Functions that might set a subtype should not be replaced by the - ** value taken from an expression index since the index omits the - ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */ - continue; - } p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr)); if( p==0 ) break; p->pIENext = pParse->pIdxEpr; @@ -168246,8 +169958,8 @@ static SQLITE_NOINLINE void whereReverseScanOrder(WhereInfo *pWInfo){ SrcItem *pItem = &pWInfo->pTabList->a[ii]; if( !pItem->fg.isCte || pItem->u2.pCteUse->eM10d!=M10d_Yes - || NEVER(pItem->pSelect==0) - || pItem->pSelect->pOrderBy==0 + || NEVER(pItem->fg.isSubquery==0) + || pItem->u4.pSubq->pSelect->pOrderBy==0 ){ pWInfo->revMask |= MASKBIT(ii); } @@ -168626,7 +170338,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( db->mallocFailed ) goto whereBeginError; if( pWInfo->pOrderBy ){ whereInterstageHeuristic(pWInfo); - wherePathSolver(pWInfo, pWInfo->nRowOut+1); + wherePathSolver(pWInfo, pWInfo->nRowOut<0 ? 1 : pWInfo->nRowOut+1); if( db->mallocFailed ) goto whereBeginError; } @@ -168650,7 +170362,8 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( assert( db->mallocFailed==0 ); #ifdef WHERETRACE_ENABLED if( sqlite3WhereTrace ){ - sqlite3DebugPrintf("---- Solution nRow=%d", pWInfo->nRowOut); + sqlite3DebugPrintf("---- Solution cost=%d, nRow=%d", + pWInfo->rTotalCost, pWInfo->nRowOut); if( pWInfo->nOBSat>0 ){ sqlite3DebugPrintf(" ORDERBY=%d,0x%llx", pWInfo->nOBSat, pWInfo->revMask); } @@ -168737,15 +170450,15 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0 ){ int wsFlags = pWInfo->a[0].pWLoop->wsFlags; int bOnerow = (wsFlags & WHERE_ONEROW)!=0; - assert( !(wsFlags & WHERE_VIRTUALTABLE) || IsVirtual(pTabList->a[0].pTab) ); + assert( !(wsFlags&WHERE_VIRTUALTABLE) || IsVirtual(pTabList->a[0].pSTab) ); if( bOnerow || ( 0!=(wctrlFlags & WHERE_ONEPASS_MULTIROW) - && !IsVirtual(pTabList->a[0].pTab) + && !IsVirtual(pTabList->a[0].pSTab) && (0==(wsFlags & WHERE_MULTI_OR) || (wctrlFlags & WHERE_DUPLICATES_OK)) && OptimizationEnabled(db, SQLITE_OnePass) )){ pWInfo->eOnePass = bOnerow ? ONEPASS_SINGLE : ONEPASS_MULTI; - if( HasRowid(pTabList->a[0].pTab) && (wsFlags & WHERE_IDX_ONLY) ){ + if( HasRowid(pTabList->a[0].pSTab) && (wsFlags & WHERE_IDX_ONLY) ){ if( wctrlFlags & WHERE_ONEPASS_MULTIROW ){ bFordelete = OPFLAG_FORDELETE; } @@ -168763,7 +170476,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( SrcItem *pTabItem; pTabItem = &pTabList->a[pLevel->iFrom]; - pTab = pTabItem->pTab; + pTab = pTabItem->pSTab; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); pLoop = pLevel->pWLoop; if( (pTab->tabFlags & TF_Ephemeral)!=0 || IsView(pTab) ){ @@ -168834,7 +170547,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( iIndexCur = pLevel->iTabCur; op = 0; }else if( pWInfo->eOnePass!=ONEPASS_OFF ){ - Index *pJ = pTabItem->pTab->pIndex; + Index *pJ = pTabItem->pSTab->pIndex; iIndexCur = iAuxArg; assert( wctrlFlags & WHERE_ONEPASS_DESIRED ); while( ALWAYS(pJ) && pJ!=pIx ){ @@ -168901,7 +170614,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( sqlite3VdbeAddOp2(v, OP_Blob, 65536, pRJ->regBloom); pRJ->regReturn = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Null, 0, pRJ->regReturn); - assert( pTab==pTabItem->pTab ); + assert( pTab==pTabItem->pSTab ); if( HasRowid(pTab) ){ KeyInfo *pInfo; sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRJ->iMatch, 1); @@ -168940,13 +170653,18 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( wsFlags = pLevel->pWLoop->wsFlags; pSrc = &pTabList->a[pLevel->iFrom]; if( pSrc->fg.isMaterialized ){ - if( pSrc->fg.isCorrelated ){ - sqlite3VdbeAddOp2(v, OP_Gosub, pSrc->regReturn, pSrc->addrFillSub); + Subquery *pSubq; + int iOnce = 0; + assert( pSrc->fg.isSubquery ); + pSubq = pSrc->u4.pSubq; + if( pSrc->fg.isCorrelated==0 ){ + iOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); }else{ - int iOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Gosub, pSrc->regReturn, pSrc->addrFillSub); - sqlite3VdbeJumpHere(v, iOnce); + iOnce = 0; } + sqlite3VdbeAddOp2(v, OP_Gosub, pSubq->regReturn, pSubq->addrFillSub); + VdbeComment((v, "materialize %!S", pSrc)); + if( iOnce ) sqlite3VdbeJumpHere(v, iOnce); } assert( pTabList == pWInfo->pTabList ); if( (wsFlags & (WHERE_AUTO_INDEX|WHERE_BLOOMFILTER))!=0 ){ @@ -169006,6 +170724,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ){ if( (db->flags & SQLITE_VdbeAddopTrace)==0 ) return; sqlite3VdbePrintOp(0, pc, pOp); + sqlite3ShowWhereTerm(0); /* So compiler won't complain about unused func */ } #endif @@ -169159,9 +170878,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ assert( pLevel->iTabCur==pSrc->iCursor ); if( pSrc->fg.viaCoroutine ){ int m, n; - n = pSrc->regResult; - assert( pSrc->pTab!=0 ); - m = pSrc->pTab->nCol; + assert( pSrc->fg.isSubquery ); + n = pSrc->u4.pSubq->regResult; + assert( pSrc->pSTab!=0 ); + m = pSrc->pSTab->nCol; sqlite3VdbeAddOp3(v, OP_Null, 0, n, n+m-1); } sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iTabCur); @@ -169185,7 +170905,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ sqlite3VdbeJumpHere(v, addr); } VdbeModuleComment((v, "End WHERE-loop%d: %s", i, - pWInfo->pTabList->a[pLevel->iFrom].pTab->zName)); + pWInfo->pTabList->a[pLevel->iFrom].pSTab->zName)); } assert( pWInfo->nLevel<=pTabList->nSrc ); @@ -169194,7 +170914,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ VdbeOp *pOp, *pLastOp; Index *pIdx = 0; SrcItem *pTabItem = &pTabList->a[pLevel->iFrom]; - Table *pTab = pTabItem->pTab; + Table *pTab = pTabItem->pSTab; assert( pTab!=0 ); pLoop = pLevel->pWLoop; @@ -169213,9 +170933,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ */ if( pTabItem->fg.viaCoroutine ){ testcase( pParse->db->mallocFailed ); - assert( pTabItem->regResult>=0 ); + assert( pTabItem->fg.isSubquery ); + assert( pTabItem->u4.pSubq->regResult>=0 ); translateColumnToCopy(pParse, pLevel->addrBody, pLevel->iTabCur, - pTabItem->regResult, 0); + pTabItem->u4.pSubq->regResult, 0); continue; } @@ -169303,14 +171024,28 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ pOp->p2 = x; pOp->p1 = pLevel->iIdxCur; OpcodeRewriteTrace(db, k, pOp); - }else{ - /* Unable to translate the table reference into an index - ** reference. Verify that this is harmless - that the - ** table being referenced really is open. - */ + }else if( pLoop->wsFlags & (WHERE_IDX_ONLY|WHERE_EXPRIDX) ){ if( pLoop->wsFlags & WHERE_IDX_ONLY ){ + /* An error. pLoop is supposed to be a covering index loop, + ** and yet the VM code refers to a column of the table that + ** is not part of the index. */ sqlite3ErrorMsg(pParse, "internal query planner error"); pParse->rc = SQLITE_INTERNAL; + }else{ + /* The WHERE_EXPRIDX flag is set by the planner when it is likely + ** that pLoop is a covering index loop, but it is not possible + ** to be 100% sure. In this case, any OP_Explain opcode + ** corresponding to this loop describes the index as a "COVERING + ** INDEX". But, pOp proves that pLoop is not actually a covering + ** index loop. So clear the WHERE_EXPRIDX flag and rewrite the + ** text that accompanies the OP_Explain opcode, if any. */ + pLoop->wsFlags &= ~WHERE_EXPRIDX; + sqlite3WhereAddExplainText(pParse, + pLevel->addrBody-1, + pTabList, + pLevel, + pWInfo->wctrlFlags + ); } } }else if( pOp->opcode==OP_Rowid ){ @@ -170257,7 +171992,7 @@ static ExprList *exprListAppendList( int iDummy; Expr *pSub; pSub = sqlite3ExprSkipCollateAndLikely(pDup); - if( sqlite3ExprIsInteger(pSub, &iDummy) ){ + if( sqlite3ExprIsInteger(pSub, &iDummy, 0) ){ pSub->op = TK_NULL; pSub->flags &= ~(EP_IntValue|EP_IsTrue|EP_IsFalse); pSub->u.zToken = 0; @@ -170425,9 +172160,10 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ assert( pSub!=0 || p->pSrc==0 ); /* Due to db->mallocFailed test inside ** of sqlite3DbMallocRawNN() called from ** sqlite3SrcListAppend() */ - if( p->pSrc ){ + if( p->pSrc==0 ){ + sqlite3SelectDelete(db, pSub); + }else if( sqlite3SrcItemAttachSubquery(pParse, &p->pSrc->a[0], pSub, 0) ){ Table *pTab2; - p->pSrc->a[0].pSelect = pSub; p->pSrc->a[0].fg.isCorrelated = 1; sqlite3SrcListAssignCursors(pParse, p->pSrc); pSub->selFlags |= SF_Expanded|SF_OrderByReqd; @@ -170441,7 +172177,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ }else{ memcpy(pTab, pTab2, sizeof(Table)); pTab->tabFlags |= TF_Ephemeral; - p->pSrc->a[0].pTab = pTab; + p->pSrc->a[0].pSTab = pTab; pTab = pTab2; memset(&w, 0, sizeof(w)); w.xExprCallback = sqlite3WindowExtraAggFuncDepth; @@ -170449,8 +172185,6 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ w.xSelectCallback2 = sqlite3WalkerDepthDecrease; sqlite3WalkSelect(&w, pSub); } - }else{ - sqlite3SelectDelete(db, pSub); } if( db->mallocFailed ) rc = SQLITE_NOMEM; @@ -170737,10 +172471,15 @@ SQLITE_PRIVATE int sqlite3WindowCompare( ** and initialize registers and cursors used by sqlite3WindowCodeStep(). */ SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse *pParse, Select *pSelect){ - int nEphExpr = pSelect->pSrc->a[0].pSelect->pEList->nExpr; - Window *pMWin = pSelect->pWin; Window *pWin; - Vdbe *v = sqlite3GetVdbe(pParse); + int nEphExpr; + Window *pMWin; + Vdbe *v; + + assert( pSelect->pSrc->a[0].fg.isSubquery ); + nEphExpr = pSelect->pSrc->a[0].u4.pSubq->pSelect->pEList->nExpr; + pMWin = pSelect->pWin; + v = sqlite3GetVdbe(pParse); sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pMWin->iEphCsr, nEphExpr); sqlite3VdbeAddOp2(v, OP_OpenDup, pMWin->iEphCsr+1, pMWin->iEphCsr); @@ -171014,6 +172753,7 @@ static void windowAggStep( int regArg; int nArg = pWin->bExprArgs ? 0 : windowArgCount(pWin); int i; + int addrIf = 0; assert( bInverse==0 || pWin->eStart!=TK_UNBOUNDED ); @@ -171030,6 +172770,18 @@ static void windowAggStep( } regArg = reg; + if( pWin->pFilter ){ + int regTmp; + assert( ExprUseXList(pWin->pOwner) ); + assert( pWin->bExprArgs || !nArg ||nArg==pWin->pOwner->x.pList->nExpr ); + assert( pWin->bExprArgs || nArg ||pWin->pOwner->x.pList==0 ); + regTmp = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol+nArg,regTmp); + addrIf = sqlite3VdbeAddOp3(v, OP_IfNot, regTmp, 0, 1); + VdbeCoverage(v); + sqlite3ReleaseTempReg(pParse, regTmp); + } + if( pMWin->regStartRowid==0 && (pFunc->funcFlags & SQLITE_FUNC_MINMAX) && (pWin->eStart!=TK_UNBOUNDED) @@ -171049,25 +172801,13 @@ static void windowAggStep( } sqlite3VdbeJumpHere(v, addrIsNull); }else if( pWin->regApp ){ + assert( pWin->pFilter==0 ); assert( pFunc->zName==nth_valueName || pFunc->zName==first_valueName ); assert( bInverse==0 || bInverse==1 ); sqlite3VdbeAddOp2(v, OP_AddImm, pWin->regApp+1-bInverse, 1); }else if( pFunc->xSFunc!=noopStepFunc ){ - int addrIf = 0; - if( pWin->pFilter ){ - int regTmp; - assert( ExprUseXList(pWin->pOwner) ); - assert( pWin->bExprArgs || !nArg ||nArg==pWin->pOwner->x.pList->nExpr ); - assert( pWin->bExprArgs || nArg ||pWin->pOwner->x.pList==0 ); - regTmp = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol+nArg,regTmp); - addrIf = sqlite3VdbeAddOp3(v, OP_IfNot, regTmp, 0, 1); - VdbeCoverage(v); - sqlite3ReleaseTempReg(pParse, regTmp); - } - if( pWin->bExprArgs ){ int iOp = sqlite3VdbeCurrentAddr(v); int iEnd; @@ -171094,12 +172834,13 @@ static void windowAggStep( sqlite3VdbeAddOp3(v, bInverse? OP_AggInverse : OP_AggStep, bInverse, regArg, pWin->regAccum); sqlite3VdbeAppendP4(v, pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3VdbeChangeP5(v, (u16)nArg); if( pWin->bExprArgs ){ sqlite3ReleaseTempRange(pParse, regArg, nArg); } - if( addrIf ) sqlite3VdbeJumpHere(v, addrIf); } + + if( addrIf ) sqlite3VdbeJumpHere(v, addrIf); } } @@ -172137,7 +173878,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( Vdbe *v = sqlite3GetVdbe(pParse); int csrWrite; /* Cursor used to write to eph. table */ int csrInput = p->pSrc->a[0].iCursor; /* Cursor of sub-select */ - int nInput = p->pSrc->a[0].pTab->nCol; /* Number of cols returned by sub */ + int nInput = p->pSrc->a[0].pSTab->nCol; /* Number of cols returned by sub */ int iInput; /* To iterate through sub cols */ int addrNe; /* Address of OP_Ne */ int addrGosubFlush = 0; /* Address of OP_Gosub to flush: */ @@ -172526,6 +174267,13 @@ struct TrigEvent { int a; IdList * b; }; struct FrameBound { int eType; Expr *pExpr; }; +/* +** Generate a syntax error +*/ +static void parserSyntaxError(Parse *pParse, Token *p){ + sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", p); +} + /* ** Disable lookaside memory allocation for objects that might be ** shared across database connections. @@ -172734,132 +174482,132 @@ static void updateDeleteLimitError( #define TK_OR 43 #define TK_AND 44 #define TK_IS 45 -#define TK_MATCH 46 -#define TK_LIKE_KW 47 -#define TK_BETWEEN 48 -#define TK_IN 49 -#define TK_ISNULL 50 -#define TK_NOTNULL 51 -#define TK_NE 52 -#define TK_EQ 53 -#define TK_GT 54 -#define TK_LE 55 -#define TK_LT 56 -#define TK_GE 57 -#define TK_ESCAPE 58 -#define TK_ID 59 -#define TK_COLUMNKW 60 -#define TK_DO 61 -#define TK_FOR 62 -#define TK_IGNORE 63 -#define TK_INITIALLY 64 -#define TK_INSTEAD 65 -#define TK_NO 66 -#define TK_KEY 67 -#define TK_OF 68 -#define TK_OFFSET 69 -#define TK_PRAGMA 70 -#define TK_RAISE 71 -#define TK_RECURSIVE 72 -#define TK_REPLACE 73 -#define TK_RESTRICT 74 -#define TK_ROW 75 -#define TK_ROWS 76 -#define TK_TRIGGER 77 -#define TK_VACUUM 78 -#define TK_VIEW 79 -#define TK_VIRTUAL 80 -#define TK_WITH 81 -#define TK_NULLS 82 -#define TK_FIRST 83 -#define TK_LAST 84 -#define TK_CURRENT 85 -#define TK_FOLLOWING 86 -#define TK_PARTITION 87 -#define TK_PRECEDING 88 -#define TK_RANGE 89 -#define TK_UNBOUNDED 90 -#define TK_EXCLUDE 91 -#define TK_GROUPS 92 -#define TK_OTHERS 93 -#define TK_TIES 94 -#define TK_GENERATED 95 -#define TK_ALWAYS 96 -#define TK_MATERIALIZED 97 -#define TK_REINDEX 98 -#define TK_RENAME 99 -#define TK_CTIME_KW 100 -#define TK_ANY 101 -#define TK_BITAND 102 -#define TK_BITOR 103 -#define TK_LSHIFT 104 -#define TK_RSHIFT 105 -#define TK_PLUS 106 -#define TK_MINUS 107 -#define TK_STAR 108 -#define TK_SLASH 109 -#define TK_REM 110 -#define TK_CONCAT 111 -#define TK_PTR 112 -#define TK_COLLATE 113 -#define TK_BITNOT 114 -#define TK_ON 115 -#define TK_INDEXED 116 -#define TK_STRING 117 -#define TK_JOIN_KW 118 -#define TK_CONSTRAINT 119 -#define TK_DEFAULT 120 -#define TK_NULL 121 -#define TK_PRIMARY 122 -#define TK_UNIQUE 123 -#define TK_CHECK 124 -#define TK_REFERENCES 125 -#define TK_AUTOINCR 126 -#define TK_INSERT 127 -#define TK_DELETE 128 -#define TK_UPDATE 129 -#define TK_SET 130 -#define TK_DEFERRABLE 131 -#define TK_FOREIGN 132 -#define TK_DROP 133 -#define TK_UNION 134 -#define TK_ALL 135 -#define TK_EXCEPT 136 -#define TK_INTERSECT 137 -#define TK_SELECT 138 -#define TK_VALUES 139 -#define TK_DISTINCT 140 -#define TK_DOT 141 -#define TK_FROM 142 -#define TK_JOIN 143 -#define TK_USING 144 -#define TK_ORDER 145 -#define TK_GROUP 146 -#define TK_HAVING 147 -#define TK_LIMIT 148 -#define TK_WHERE 149 -#define TK_RETURNING 150 -#define TK_INTO 151 -#define TK_NOTHING 152 -#define TK_FLOAT 153 -#define TK_BLOB 154 -#define TK_INTEGER 155 -#define TK_VARIABLE 156 -#define TK_CASE 157 -#define TK_WHEN 158 -#define TK_THEN 159 -#define TK_ELSE 160 -#define TK_INDEX 161 -#define TK_ALTER 162 -#define TK_ADD 163 -#define TK_WINDOW 164 -#define TK_OVER 165 -#define TK_FILTER 166 -#define TK_COLUMN 167 -#define TK_AGG_FUNCTION 168 -#define TK_AGG_COLUMN 169 -#define TK_TRUEFALSE 170 -#define TK_ISNOT 171 +#define TK_ISNOT 46 +#define TK_MATCH 47 +#define TK_LIKE_KW 48 +#define TK_BETWEEN 49 +#define TK_IN 50 +#define TK_ISNULL 51 +#define TK_NOTNULL 52 +#define TK_NE 53 +#define TK_EQ 54 +#define TK_GT 55 +#define TK_LE 56 +#define TK_LT 57 +#define TK_GE 58 +#define TK_ESCAPE 59 +#define TK_ID 60 +#define TK_COLUMNKW 61 +#define TK_DO 62 +#define TK_FOR 63 +#define TK_IGNORE 64 +#define TK_INITIALLY 65 +#define TK_INSTEAD 66 +#define TK_NO 67 +#define TK_KEY 68 +#define TK_OF 69 +#define TK_OFFSET 70 +#define TK_PRAGMA 71 +#define TK_RAISE 72 +#define TK_RECURSIVE 73 +#define TK_REPLACE 74 +#define TK_RESTRICT 75 +#define TK_ROW 76 +#define TK_ROWS 77 +#define TK_TRIGGER 78 +#define TK_VACUUM 79 +#define TK_VIEW 80 +#define TK_VIRTUAL 81 +#define TK_WITH 82 +#define TK_NULLS 83 +#define TK_FIRST 84 +#define TK_LAST 85 +#define TK_CURRENT 86 +#define TK_FOLLOWING 87 +#define TK_PARTITION 88 +#define TK_PRECEDING 89 +#define TK_RANGE 90 +#define TK_UNBOUNDED 91 +#define TK_EXCLUDE 92 +#define TK_GROUPS 93 +#define TK_OTHERS 94 +#define TK_TIES 95 +#define TK_GENERATED 96 +#define TK_ALWAYS 97 +#define TK_MATERIALIZED 98 +#define TK_REINDEX 99 +#define TK_RENAME 100 +#define TK_CTIME_KW 101 +#define TK_ANY 102 +#define TK_BITAND 103 +#define TK_BITOR 104 +#define TK_LSHIFT 105 +#define TK_RSHIFT 106 +#define TK_PLUS 107 +#define TK_MINUS 108 +#define TK_STAR 109 +#define TK_SLASH 110 +#define TK_REM 111 +#define TK_CONCAT 112 +#define TK_PTR 113 +#define TK_COLLATE 114 +#define TK_BITNOT 115 +#define TK_ON 116 +#define TK_INDEXED 117 +#define TK_STRING 118 +#define TK_JOIN_KW 119 +#define TK_CONSTRAINT 120 +#define TK_DEFAULT 121 +#define TK_NULL 122 +#define TK_PRIMARY 123 +#define TK_UNIQUE 124 +#define TK_CHECK 125 +#define TK_REFERENCES 126 +#define TK_AUTOINCR 127 +#define TK_INSERT 128 +#define TK_DELETE 129 +#define TK_UPDATE 130 +#define TK_SET 131 +#define TK_DEFERRABLE 132 +#define TK_FOREIGN 133 +#define TK_DROP 134 +#define TK_UNION 135 +#define TK_ALL 136 +#define TK_EXCEPT 137 +#define TK_INTERSECT 138 +#define TK_SELECT 139 +#define TK_VALUES 140 +#define TK_DISTINCT 141 +#define TK_DOT 142 +#define TK_FROM 143 +#define TK_JOIN 144 +#define TK_USING 145 +#define TK_ORDER 146 +#define TK_GROUP 147 +#define TK_HAVING 148 +#define TK_LIMIT 149 +#define TK_WHERE 150 +#define TK_RETURNING 151 +#define TK_INTO 152 +#define TK_NOTHING 153 +#define TK_FLOAT 154 +#define TK_BLOB 155 +#define TK_INTEGER 156 +#define TK_VARIABLE 157 +#define TK_CASE 158 +#define TK_WHEN 159 +#define TK_THEN 160 +#define TK_ELSE 161 +#define TK_INDEX 162 +#define TK_ALTER 163 +#define TK_ADD 164 +#define TK_WINDOW 165 +#define TK_OVER 166 +#define TK_FILTER 167 +#define TK_COLUMN 168 +#define TK_AGG_FUNCTION 169 +#define TK_AGG_COLUMN 170 +#define TK_TRUEFALSE 171 #define TK_FUNCTION 172 #define TK_UPLUS 173 #define TK_UMINUS 174 @@ -172873,7 +174621,8 @@ static void updateDeleteLimitError( #define TK_ERROR 182 #define TK_QNUMBER 183 #define TK_SPACE 184 -#define TK_ILLEGAL 185 +#define TK_COMMENT 185 +#define TK_ILLEGAL 186 #endif /**************** End token definitions ***************************************/ @@ -172938,31 +174687,31 @@ static void updateDeleteLimitError( #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 322 +#define YYNOCODE 323 #define YYACTIONTYPE unsigned short int -#define YYWILDCARD 101 +#define YYWILDCARD 102 #define sqlite3ParserTOKENTYPE Token typedef union { int yyinit; sqlite3ParserTOKENTYPE yy0; - ExprList* yy14; - With* yy59; - Cte* yy67; - Upsert* yy122; - IdList* yy132; - int yy144; - const char* yy168; - SrcList* yy203; - Window* yy211; - OnOrUsing yy269; - struct TrigEvent yy286; - struct {int value; int mask;} yy383; - u32 yy391; - TriggerStep* yy427; - Expr* yy454; - u8 yy462; - struct FrameBound yy509; - Select* yy555; + u32 yy9; + struct TrigEvent yy28; + With* yy125; + IdList* yy204; + struct FrameBound yy205; + TriggerStep* yy319; + const char* yy342; + Cte* yy361; + ExprList* yy402; + Upsert* yy403; + OnOrUsing yy421; + u8 yy444; + struct {int value; int mask;} yy481; + Window* yy483; + int yy502; + SrcList* yy563; + Expr* yy590; + Select* yy637; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -172984,7 +174733,7 @@ typedef union { #define YYNSTATE 583 #define YYNRULE 409 #define YYNRULE_WITH_ACTION 344 -#define YYNTOKEN 186 +#define YYNTOKEN 187 #define YY_MAX_SHIFT 582 #define YY_MIN_SHIFTREDUCE 845 #define YY_MAX_SHIFTREDUCE 1253 @@ -172993,8 +174742,8 @@ typedef union { #define YY_NO_ACTION 1256 #define YY_MIN_REDUCE 1257 #define YY_MAX_REDUCE 1665 -#define YY_MIN_DSTRCTR 205 -#define YY_MAX_DSTRCTR 319 +#define YY_MIN_DSTRCTR 206 +#define YY_MAX_DSTRCTR 320 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -173077,569 +174826,582 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2142) +#define YY_ACTTAB_COUNT (2207) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 576, 128, 125, 232, 1622, 549, 576, 1290, 1281, 576, - /* 10 */ 328, 576, 1300, 212, 576, 128, 125, 232, 578, 412, - /* 20 */ 578, 391, 1542, 51, 51, 523, 405, 1293, 529, 51, - /* 30 */ 51, 983, 51, 51, 81, 81, 1107, 61, 61, 984, - /* 40 */ 1107, 1292, 380, 135, 136, 90, 1228, 1228, 1063, 1066, - /* 50 */ 1053, 1053, 133, 133, 134, 134, 134, 134, 1577, 412, - /* 60 */ 287, 287, 7, 287, 287, 422, 1050, 1050, 1064, 1067, - /* 70 */ 289, 556, 492, 573, 524, 561, 573, 497, 561, 482, - /* 80 */ 530, 262, 229, 135, 136, 90, 1228, 1228, 1063, 1066, - /* 90 */ 1053, 1053, 133, 133, 134, 134, 134, 134, 128, 125, - /* 100 */ 232, 1506, 132, 132, 132, 132, 131, 131, 130, 130, - /* 110 */ 130, 129, 126, 450, 1204, 1255, 1, 1, 582, 2, - /* 120 */ 1259, 1571, 420, 1582, 379, 320, 1174, 153, 1174, 1584, - /* 130 */ 412, 378, 1582, 543, 1341, 330, 111, 570, 570, 570, - /* 140 */ 293, 1054, 132, 132, 132, 132, 131, 131, 130, 130, - /* 150 */ 130, 129, 126, 450, 135, 136, 90, 1228, 1228, 1063, - /* 160 */ 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, 287, - /* 170 */ 287, 1204, 1205, 1204, 255, 287, 287, 510, 507, 506, - /* 180 */ 137, 455, 573, 212, 561, 447, 446, 505, 573, 1616, - /* 190 */ 561, 134, 134, 134, 134, 127, 400, 243, 132, 132, - /* 200 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 450, - /* 210 */ 282, 471, 345, 132, 132, 132, 132, 131, 131, 130, - /* 220 */ 130, 130, 129, 126, 450, 574, 155, 936, 936, 454, - /* 230 */ 227, 521, 1236, 412, 1236, 134, 134, 134, 134, 132, - /* 240 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, - /* 250 */ 450, 130, 130, 130, 129, 126, 450, 135, 136, 90, - /* 260 */ 1228, 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, - /* 270 */ 134, 134, 128, 125, 232, 450, 576, 412, 397, 1249, - /* 280 */ 180, 92, 93, 132, 132, 132, 132, 131, 131, 130, - /* 290 */ 130, 130, 129, 126, 450, 381, 387, 1204, 383, 81, - /* 300 */ 81, 135, 136, 90, 1228, 1228, 1063, 1066, 1053, 1053, - /* 310 */ 133, 133, 134, 134, 134, 134, 132, 132, 132, 132, - /* 320 */ 131, 131, 130, 130, 130, 129, 126, 450, 131, 131, - /* 330 */ 130, 130, 130, 129, 126, 450, 556, 1204, 302, 319, - /* 340 */ 567, 121, 568, 480, 4, 555, 1149, 1657, 1628, 1657, - /* 350 */ 45, 128, 125, 232, 1204, 1205, 1204, 1250, 571, 1169, - /* 360 */ 132, 132, 132, 132, 131, 131, 130, 130, 130, 129, - /* 370 */ 126, 450, 1169, 287, 287, 1169, 1019, 576, 422, 1019, - /* 380 */ 412, 451, 1602, 582, 2, 1259, 573, 44, 561, 95, - /* 390 */ 320, 110, 153, 565, 1204, 1205, 1204, 522, 522, 1341, - /* 400 */ 81, 81, 7, 44, 135, 136, 90, 1228, 1228, 1063, - /* 410 */ 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, 295, - /* 420 */ 1149, 1658, 1040, 1658, 1204, 1147, 319, 567, 119, 119, - /* 430 */ 343, 466, 331, 343, 287, 287, 120, 556, 451, 577, - /* 440 */ 451, 1169, 1169, 1028, 319, 567, 438, 573, 210, 561, - /* 450 */ 1339, 1451, 546, 531, 1169, 1169, 1598, 1169, 1169, 416, - /* 460 */ 319, 567, 243, 132, 132, 132, 132, 131, 131, 130, - /* 470 */ 130, 130, 129, 126, 450, 1028, 1028, 1030, 1031, 35, - /* 480 */ 44, 1204, 1205, 1204, 472, 287, 287, 1328, 412, 1307, - /* 490 */ 372, 1595, 359, 225, 454, 1204, 195, 1328, 573, 1147, - /* 500 */ 561, 1333, 1333, 274, 576, 1188, 576, 340, 46, 196, - /* 510 */ 537, 217, 135, 136, 90, 1228, 1228, 1063, 1066, 1053, - /* 520 */ 1053, 133, 133, 134, 134, 134, 134, 19, 19, 19, - /* 530 */ 19, 412, 581, 1204, 1259, 511, 1204, 319, 567, 320, - /* 540 */ 944, 153, 425, 491, 430, 943, 1204, 488, 1341, 1450, - /* 550 */ 532, 1277, 1204, 1205, 1204, 135, 136, 90, 1228, 1228, - /* 560 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 570 */ 575, 132, 132, 132, 132, 131, 131, 130, 130, 130, - /* 580 */ 129, 126, 450, 287, 287, 528, 287, 287, 372, 1595, - /* 590 */ 1204, 1205, 1204, 1204, 1205, 1204, 573, 486, 561, 573, - /* 600 */ 889, 561, 412, 1204, 1205, 1204, 886, 40, 22, 22, - /* 610 */ 220, 243, 525, 1449, 132, 132, 132, 132, 131, 131, - /* 620 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 630 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 640 */ 134, 412, 180, 454, 1204, 879, 255, 287, 287, 510, - /* 650 */ 507, 506, 372, 1595, 1568, 1331, 1331, 576, 889, 505, - /* 660 */ 573, 44, 561, 559, 1207, 135, 136, 90, 1228, 1228, - /* 670 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 680 */ 81, 81, 422, 576, 377, 132, 132, 132, 132, 131, - /* 690 */ 131, 130, 130, 130, 129, 126, 450, 297, 287, 287, - /* 700 */ 460, 1204, 1205, 1204, 1204, 534, 19, 19, 448, 448, - /* 710 */ 448, 573, 412, 561, 230, 436, 1187, 535, 319, 567, - /* 720 */ 363, 432, 1207, 1435, 132, 132, 132, 132, 131, 131, - /* 730 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 740 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 750 */ 134, 412, 211, 949, 1169, 1041, 1110, 1110, 494, 547, - /* 760 */ 547, 1204, 1205, 1204, 7, 539, 1570, 1169, 376, 576, - /* 770 */ 1169, 5, 1204, 486, 3, 135, 136, 90, 1228, 1228, - /* 780 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 790 */ 576, 513, 19, 19, 427, 132, 132, 132, 132, 131, - /* 800 */ 131, 130, 130, 130, 129, 126, 450, 305, 1204, 433, - /* 810 */ 225, 1204, 385, 19, 19, 273, 290, 371, 516, 366, - /* 820 */ 515, 260, 412, 538, 1568, 549, 1024, 362, 437, 1204, - /* 830 */ 1205, 1204, 902, 1552, 132, 132, 132, 132, 131, 131, - /* 840 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 850 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 860 */ 134, 412, 1435, 514, 1281, 1204, 1205, 1204, 1204, 1205, - /* 870 */ 1204, 903, 48, 342, 1568, 1568, 1279, 1627, 1568, 911, - /* 880 */ 576, 129, 126, 450, 110, 135, 136, 90, 1228, 1228, - /* 890 */ 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, 134, - /* 900 */ 265, 576, 459, 19, 19, 132, 132, 132, 132, 131, - /* 910 */ 131, 130, 130, 130, 129, 126, 450, 1345, 204, 576, - /* 920 */ 459, 458, 50, 47, 19, 19, 49, 434, 1105, 573, - /* 930 */ 497, 561, 412, 428, 108, 1224, 1569, 1554, 376, 205, - /* 940 */ 550, 550, 81, 81, 132, 132, 132, 132, 131, 131, - /* 950 */ 130, 130, 130, 129, 126, 450, 135, 136, 90, 1228, - /* 960 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 970 */ 134, 480, 576, 1204, 576, 1541, 412, 1435, 969, 315, - /* 980 */ 1659, 398, 284, 497, 969, 893, 1569, 1569, 376, 376, - /* 990 */ 1569, 461, 376, 1224, 459, 80, 80, 81, 81, 497, - /* 1000 */ 374, 114, 90, 1228, 1228, 1063, 1066, 1053, 1053, 133, - /* 1010 */ 133, 134, 134, 134, 134, 132, 132, 132, 132, 131, - /* 1020 */ 131, 130, 130, 130, 129, 126, 450, 1204, 1505, 576, - /* 1030 */ 1204, 1205, 1204, 1366, 316, 486, 281, 281, 497, 431, - /* 1040 */ 557, 288, 288, 402, 1340, 471, 345, 298, 429, 573, - /* 1050 */ 576, 561, 81, 81, 573, 374, 561, 971, 386, 132, - /* 1060 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, - /* 1070 */ 450, 231, 117, 81, 81, 287, 287, 231, 287, 287, - /* 1080 */ 576, 1511, 576, 1336, 1204, 1205, 1204, 139, 573, 556, - /* 1090 */ 561, 573, 412, 561, 441, 456, 969, 213, 558, 1511, - /* 1100 */ 1513, 1550, 969, 143, 143, 145, 145, 1368, 314, 478, - /* 1110 */ 444, 970, 412, 850, 851, 852, 135, 136, 90, 1228, - /* 1120 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 1130 */ 134, 357, 412, 397, 1148, 304, 135, 136, 90, 1228, - /* 1140 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 1150 */ 134, 1575, 323, 6, 862, 7, 135, 124, 90, 1228, - /* 1160 */ 1228, 1063, 1066, 1053, 1053, 133, 133, 134, 134, 134, - /* 1170 */ 134, 409, 408, 1511, 212, 132, 132, 132, 132, 131, - /* 1180 */ 131, 130, 130, 130, 129, 126, 450, 411, 118, 1204, - /* 1190 */ 116, 10, 352, 265, 355, 132, 132, 132, 132, 131, - /* 1200 */ 131, 130, 130, 130, 129, 126, 450, 576, 324, 306, - /* 1210 */ 576, 306, 1250, 469, 158, 132, 132, 132, 132, 131, - /* 1220 */ 131, 130, 130, 130, 129, 126, 450, 207, 1224, 1126, - /* 1230 */ 65, 65, 470, 66, 66, 412, 447, 446, 882, 531, - /* 1240 */ 335, 258, 257, 256, 1127, 1233, 1204, 1205, 1204, 327, - /* 1250 */ 1235, 874, 159, 576, 16, 480, 1085, 1040, 1234, 1128, - /* 1260 */ 136, 90, 1228, 1228, 1063, 1066, 1053, 1053, 133, 133, - /* 1270 */ 134, 134, 134, 134, 1029, 576, 81, 81, 1028, 1040, - /* 1280 */ 922, 576, 463, 1236, 576, 1236, 1224, 502, 107, 1435, - /* 1290 */ 923, 6, 576, 410, 1498, 882, 1029, 480, 21, 21, - /* 1300 */ 1028, 332, 1380, 334, 53, 53, 497, 81, 81, 874, - /* 1310 */ 1028, 1028, 1030, 445, 259, 19, 19, 533, 132, 132, - /* 1320 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 450, - /* 1330 */ 551, 301, 1028, 1028, 1030, 107, 532, 545, 121, 568, - /* 1340 */ 1188, 4, 1126, 1576, 449, 576, 462, 7, 1282, 418, - /* 1350 */ 462, 350, 1435, 576, 518, 571, 544, 1127, 121, 568, - /* 1360 */ 442, 4, 1188, 464, 533, 1180, 1223, 9, 67, 67, - /* 1370 */ 487, 576, 1128, 303, 410, 571, 54, 54, 451, 576, - /* 1380 */ 123, 944, 576, 417, 576, 333, 943, 1379, 576, 236, - /* 1390 */ 565, 576, 1574, 564, 68, 68, 7, 576, 451, 362, - /* 1400 */ 419, 182, 69, 69, 541, 70, 70, 71, 71, 540, - /* 1410 */ 565, 72, 72, 484, 55, 55, 473, 1180, 296, 1040, - /* 1420 */ 56, 56, 296, 493, 541, 119, 119, 410, 1573, 542, - /* 1430 */ 569, 418, 7, 120, 1244, 451, 577, 451, 465, 1040, - /* 1440 */ 1028, 576, 1557, 552, 476, 119, 119, 527, 259, 121, - /* 1450 */ 568, 240, 4, 120, 576, 451, 577, 451, 576, 477, - /* 1460 */ 1028, 576, 156, 576, 57, 57, 571, 576, 286, 229, - /* 1470 */ 410, 336, 1028, 1028, 1030, 1031, 35, 59, 59, 219, - /* 1480 */ 983, 60, 60, 220, 73, 73, 74, 74, 984, 451, - /* 1490 */ 75, 75, 1028, 1028, 1030, 1031, 35, 96, 216, 291, - /* 1500 */ 552, 565, 1188, 318, 395, 395, 394, 276, 392, 576, - /* 1510 */ 485, 859, 474, 1311, 410, 541, 576, 417, 1530, 1144, - /* 1520 */ 540, 399, 1188, 292, 237, 1153, 326, 38, 23, 576, - /* 1530 */ 1040, 576, 20, 20, 325, 299, 119, 119, 164, 76, - /* 1540 */ 76, 1529, 121, 568, 120, 4, 451, 577, 451, 203, - /* 1550 */ 576, 1028, 141, 141, 142, 142, 576, 322, 39, 571, - /* 1560 */ 341, 1021, 110, 264, 239, 901, 900, 423, 242, 908, - /* 1570 */ 909, 370, 173, 77, 77, 43, 479, 1310, 264, 62, - /* 1580 */ 62, 369, 451, 1028, 1028, 1030, 1031, 35, 1601, 1192, - /* 1590 */ 453, 1092, 238, 291, 565, 163, 1309, 110, 395, 395, - /* 1600 */ 394, 276, 392, 986, 987, 859, 481, 346, 264, 110, - /* 1610 */ 1032, 489, 576, 1188, 503, 1088, 261, 261, 237, 576, - /* 1620 */ 326, 121, 568, 1040, 4, 347, 1376, 413, 325, 119, - /* 1630 */ 119, 948, 319, 567, 351, 78, 78, 120, 571, 451, - /* 1640 */ 577, 451, 79, 79, 1028, 354, 356, 576, 360, 1092, - /* 1650 */ 110, 576, 974, 942, 264, 123, 457, 358, 239, 576, - /* 1660 */ 519, 451, 939, 1104, 123, 1104, 173, 576, 1032, 43, - /* 1670 */ 63, 63, 1324, 565, 168, 168, 1028, 1028, 1030, 1031, - /* 1680 */ 35, 576, 169, 169, 1308, 872, 238, 157, 1589, 576, - /* 1690 */ 86, 86, 365, 89, 568, 375, 4, 1103, 941, 1103, - /* 1700 */ 123, 576, 1040, 1389, 64, 64, 1188, 1434, 119, 119, - /* 1710 */ 571, 576, 82, 82, 563, 576, 120, 165, 451, 577, - /* 1720 */ 451, 413, 1362, 1028, 144, 144, 319, 567, 576, 1374, - /* 1730 */ 562, 498, 279, 451, 83, 83, 1439, 576, 166, 166, - /* 1740 */ 576, 1289, 554, 576, 1280, 565, 576, 12, 576, 1268, - /* 1750 */ 457, 146, 146, 1267, 576, 1028, 1028, 1030, 1031, 35, - /* 1760 */ 140, 140, 1269, 167, 167, 1609, 160, 160, 1359, 150, - /* 1770 */ 150, 149, 149, 311, 1040, 576, 312, 147, 147, 313, - /* 1780 */ 119, 119, 222, 235, 576, 1188, 396, 576, 120, 576, - /* 1790 */ 451, 577, 451, 1192, 453, 1028, 508, 291, 148, 148, - /* 1800 */ 1421, 1612, 395, 395, 394, 276, 392, 85, 85, 859, - /* 1810 */ 87, 87, 84, 84, 553, 576, 294, 576, 1426, 338, - /* 1820 */ 339, 1425, 237, 300, 326, 1416, 1409, 1028, 1028, 1030, - /* 1830 */ 1031, 35, 325, 344, 403, 483, 226, 1307, 52, 52, - /* 1840 */ 58, 58, 368, 1371, 1502, 566, 1501, 121, 568, 221, - /* 1850 */ 4, 208, 268, 209, 390, 1244, 1549, 1188, 1372, 1370, - /* 1860 */ 1369, 1547, 239, 184, 571, 233, 421, 1241, 95, 218, - /* 1870 */ 173, 1507, 193, 43, 91, 94, 178, 186, 467, 188, - /* 1880 */ 468, 1422, 13, 189, 190, 191, 501, 451, 245, 108, - /* 1890 */ 238, 401, 1428, 1427, 1430, 475, 404, 1496, 197, 565, - /* 1900 */ 14, 490, 249, 101, 1518, 496, 349, 280, 251, 201, - /* 1910 */ 353, 499, 252, 406, 1270, 253, 517, 1327, 1326, 435, - /* 1920 */ 1325, 1318, 103, 893, 1296, 413, 227, 407, 1040, 1626, - /* 1930 */ 319, 567, 1625, 1297, 119, 119, 439, 367, 1317, 1295, - /* 1940 */ 1624, 526, 120, 440, 451, 577, 451, 1594, 309, 1028, - /* 1950 */ 310, 373, 266, 267, 457, 1580, 1579, 443, 138, 1394, - /* 1960 */ 552, 1393, 11, 1483, 384, 115, 317, 1350, 109, 536, - /* 1970 */ 42, 579, 382, 214, 1349, 388, 1198, 389, 275, 277, - /* 1980 */ 278, 1028, 1028, 1030, 1031, 35, 580, 1265, 414, 1260, - /* 1990 */ 170, 415, 183, 1534, 1535, 1533, 171, 154, 307, 1532, - /* 2000 */ 846, 223, 224, 88, 452, 215, 172, 321, 234, 1102, - /* 2010 */ 152, 1188, 1100, 329, 185, 174, 1223, 925, 187, 241, - /* 2020 */ 337, 244, 1116, 192, 175, 176, 424, 426, 97, 194, - /* 2030 */ 98, 99, 100, 177, 1119, 1115, 246, 247, 161, 24, - /* 2040 */ 248, 348, 1238, 264, 1108, 250, 495, 199, 198, 15, - /* 2050 */ 861, 500, 369, 254, 504, 509, 512, 200, 102, 25, - /* 2060 */ 179, 361, 26, 364, 104, 891, 308, 162, 105, 904, - /* 2070 */ 520, 106, 1185, 1069, 1155, 17, 228, 27, 1154, 283, - /* 2080 */ 285, 263, 978, 202, 972, 123, 28, 1175, 29, 30, - /* 2090 */ 1179, 1171, 31, 1173, 1160, 41, 32, 206, 548, 33, - /* 2100 */ 110, 1178, 1083, 8, 112, 1070, 113, 1068, 1072, 34, - /* 2110 */ 1073, 560, 1125, 269, 1124, 270, 36, 18, 1194, 1033, - /* 2120 */ 873, 151, 122, 37, 393, 271, 272, 572, 181, 1193, - /* 2130 */ 1256, 1256, 1256, 935, 1256, 1256, 1256, 1256, 1256, 1256, - /* 2140 */ 1256, 1617, + /* 0 */ 130, 127, 234, 282, 282, 1328, 576, 1307, 460, 289, + /* 10 */ 289, 576, 1622, 381, 576, 1328, 573, 576, 562, 413, + /* 20 */ 1300, 1542, 573, 481, 562, 524, 460, 459, 558, 82, + /* 30 */ 82, 983, 294, 375, 51, 51, 498, 61, 61, 984, + /* 40 */ 82, 82, 1577, 137, 138, 91, 7, 1228, 1228, 1063, + /* 50 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 413, + /* 60 */ 288, 288, 182, 288, 288, 481, 536, 288, 288, 130, + /* 70 */ 127, 234, 432, 573, 525, 562, 573, 557, 562, 1290, + /* 80 */ 573, 421, 562, 137, 138, 91, 559, 1228, 1228, 1063, + /* 90 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 296, + /* 100 */ 460, 398, 1249, 134, 134, 134, 134, 133, 133, 132, + /* 110 */ 132, 132, 131, 128, 451, 451, 1050, 1050, 1064, 1067, + /* 120 */ 1255, 1, 1, 582, 2, 1259, 581, 1174, 1259, 1174, + /* 130 */ 321, 413, 155, 321, 1584, 155, 379, 112, 481, 1341, + /* 140 */ 456, 299, 1341, 134, 134, 134, 134, 133, 133, 132, + /* 150 */ 132, 132, 131, 128, 451, 137, 138, 91, 498, 1228, + /* 160 */ 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, + /* 170 */ 136, 1204, 862, 1281, 288, 288, 283, 288, 288, 523, + /* 180 */ 523, 1250, 139, 578, 7, 578, 1345, 573, 1169, 562, + /* 190 */ 573, 1054, 562, 136, 136, 136, 136, 129, 573, 547, + /* 200 */ 562, 1169, 245, 1541, 1169, 245, 133, 133, 132, 132, + /* 210 */ 132, 131, 128, 451, 302, 134, 134, 134, 134, 133, + /* 220 */ 133, 132, 132, 132, 131, 128, 451, 1575, 1204, 1205, + /* 230 */ 1204, 7, 470, 550, 455, 413, 550, 455, 130, 127, + /* 240 */ 234, 134, 134, 134, 134, 133, 133, 132, 132, 132, + /* 250 */ 131, 128, 451, 136, 136, 136, 136, 538, 483, 137, + /* 260 */ 138, 91, 1019, 1228, 1228, 1063, 1066, 1053, 1053, 135, + /* 270 */ 135, 136, 136, 136, 136, 1085, 576, 1204, 132, 132, + /* 280 */ 132, 131, 128, 451, 93, 214, 134, 134, 134, 134, + /* 290 */ 133, 133, 132, 132, 132, 131, 128, 451, 401, 19, + /* 300 */ 19, 134, 134, 134, 134, 133, 133, 132, 132, 132, + /* 310 */ 131, 128, 451, 1498, 426, 267, 344, 467, 332, 134, + /* 320 */ 134, 134, 134, 133, 133, 132, 132, 132, 131, 128, + /* 330 */ 451, 1281, 576, 6, 1204, 1205, 1204, 257, 576, 413, + /* 340 */ 511, 508, 507, 1279, 94, 1019, 464, 1204, 551, 551, + /* 350 */ 506, 1224, 1571, 44, 38, 51, 51, 411, 576, 413, + /* 360 */ 45, 51, 51, 137, 138, 91, 530, 1228, 1228, 1063, + /* 370 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 398, + /* 380 */ 1148, 82, 82, 137, 138, 91, 39, 1228, 1228, 1063, + /* 390 */ 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, 344, + /* 400 */ 44, 288, 288, 375, 1204, 1205, 1204, 209, 1204, 1224, + /* 410 */ 320, 567, 471, 576, 573, 576, 562, 576, 316, 264, + /* 420 */ 231, 46, 160, 134, 134, 134, 134, 133, 133, 132, + /* 430 */ 132, 132, 131, 128, 451, 303, 82, 82, 82, 82, + /* 440 */ 82, 82, 442, 134, 134, 134, 134, 133, 133, 132, + /* 450 */ 132, 132, 131, 128, 451, 1582, 544, 320, 567, 1250, + /* 460 */ 874, 1582, 380, 382, 413, 1204, 1205, 1204, 360, 182, + /* 470 */ 288, 288, 1576, 557, 1339, 557, 7, 557, 1277, 472, + /* 480 */ 346, 526, 531, 573, 556, 562, 439, 1511, 137, 138, + /* 490 */ 91, 219, 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, + /* 500 */ 136, 136, 136, 136, 465, 1511, 1513, 532, 413, 288, + /* 510 */ 288, 423, 512, 288, 288, 411, 288, 288, 874, 130, + /* 520 */ 127, 234, 573, 1107, 562, 1204, 573, 1107, 562, 573, + /* 530 */ 560, 562, 137, 138, 91, 1293, 1228, 1228, 1063, 1066, + /* 540 */ 1053, 1053, 135, 135, 136, 136, 136, 136, 134, 134, + /* 550 */ 134, 134, 133, 133, 132, 132, 132, 131, 128, 451, + /* 560 */ 493, 503, 1292, 1204, 257, 288, 288, 511, 508, 507, + /* 570 */ 1204, 1628, 1169, 123, 568, 275, 4, 506, 573, 1511, + /* 580 */ 562, 331, 1204, 1205, 1204, 1169, 548, 548, 1169, 261, + /* 590 */ 571, 7, 134, 134, 134, 134, 133, 133, 132, 132, + /* 600 */ 132, 131, 128, 451, 108, 533, 130, 127, 234, 1204, + /* 610 */ 448, 447, 413, 1451, 452, 983, 886, 96, 1598, 1233, + /* 620 */ 1204, 1205, 1204, 984, 1235, 1450, 565, 1204, 1205, 1204, + /* 630 */ 229, 522, 1234, 534, 1333, 1333, 137, 138, 91, 1449, + /* 640 */ 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, + /* 650 */ 136, 136, 373, 1595, 971, 1040, 413, 1236, 418, 1236, + /* 660 */ 879, 121, 121, 948, 373, 1595, 1204, 1205, 1204, 122, + /* 670 */ 1204, 452, 577, 452, 363, 417, 1028, 882, 373, 1595, + /* 680 */ 137, 138, 91, 462, 1228, 1228, 1063, 1066, 1053, 1053, + /* 690 */ 135, 135, 136, 136, 136, 136, 134, 134, 134, 134, + /* 700 */ 133, 133, 132, 132, 132, 131, 128, 451, 1028, 1028, + /* 710 */ 1030, 1031, 35, 570, 570, 570, 197, 423, 1040, 198, + /* 720 */ 1204, 123, 568, 1204, 4, 320, 567, 1204, 1205, 1204, + /* 730 */ 40, 388, 576, 384, 882, 1029, 423, 1188, 571, 1028, + /* 740 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 750 */ 128, 451, 529, 1568, 1204, 19, 19, 1204, 575, 492, + /* 760 */ 413, 157, 452, 489, 1187, 1331, 1331, 5, 1204, 949, + /* 770 */ 431, 1028, 1028, 1030, 565, 22, 22, 1204, 1205, 1204, + /* 780 */ 1204, 1205, 1204, 477, 137, 138, 91, 212, 1228, 1228, + /* 790 */ 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, + /* 800 */ 1188, 48, 111, 1040, 413, 1204, 213, 970, 1041, 121, + /* 810 */ 121, 1204, 1205, 1204, 1204, 1205, 1204, 122, 221, 452, + /* 820 */ 577, 452, 44, 487, 1028, 1204, 1205, 1204, 137, 138, + /* 830 */ 91, 378, 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, + /* 840 */ 136, 136, 136, 136, 134, 134, 134, 134, 133, 133, + /* 850 */ 132, 132, 132, 131, 128, 451, 1028, 1028, 1030, 1031, + /* 860 */ 35, 461, 1204, 1205, 1204, 1569, 1040, 377, 214, 1149, + /* 870 */ 1657, 535, 1657, 437, 902, 320, 567, 1568, 364, 320, + /* 880 */ 567, 412, 329, 1029, 519, 1188, 3, 1028, 134, 134, + /* 890 */ 134, 134, 133, 133, 132, 132, 132, 131, 128, 451, + /* 900 */ 1659, 399, 1169, 307, 893, 307, 515, 576, 413, 214, + /* 910 */ 498, 944, 1024, 540, 903, 1169, 943, 392, 1169, 1028, + /* 920 */ 1028, 1030, 406, 298, 1204, 50, 1149, 1658, 413, 1658, + /* 930 */ 145, 145, 137, 138, 91, 293, 1228, 1228, 1063, 1066, + /* 940 */ 1053, 1053, 135, 135, 136, 136, 136, 136, 1188, 1147, + /* 950 */ 514, 1568, 137, 138, 91, 1505, 1228, 1228, 1063, 1066, + /* 960 */ 1053, 1053, 135, 135, 136, 136, 136, 136, 434, 323, + /* 970 */ 435, 539, 111, 1506, 274, 291, 372, 517, 367, 516, + /* 980 */ 262, 1204, 1205, 1204, 1574, 481, 363, 576, 7, 1569, + /* 990 */ 1568, 377, 134, 134, 134, 134, 133, 133, 132, 132, + /* 1000 */ 132, 131, 128, 451, 1568, 576, 1147, 576, 232, 576, + /* 1010 */ 19, 19, 134, 134, 134, 134, 133, 133, 132, 132, + /* 1020 */ 132, 131, 128, 451, 1169, 433, 576, 1207, 19, 19, + /* 1030 */ 19, 19, 19, 19, 1627, 576, 911, 1169, 47, 120, + /* 1040 */ 1169, 117, 413, 306, 498, 438, 1125, 206, 336, 19, + /* 1050 */ 19, 1435, 49, 449, 449, 449, 1368, 315, 81, 81, + /* 1060 */ 576, 304, 413, 1570, 207, 377, 137, 138, 91, 115, + /* 1070 */ 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, + /* 1080 */ 136, 136, 576, 82, 82, 1207, 137, 138, 91, 1340, + /* 1090 */ 1228, 1228, 1063, 1066, 1053, 1053, 135, 135, 136, 136, + /* 1100 */ 136, 136, 1569, 386, 377, 82, 82, 463, 1126, 1552, + /* 1110 */ 333, 463, 335, 131, 128, 451, 1569, 161, 377, 16, + /* 1120 */ 317, 387, 428, 1127, 448, 447, 134, 134, 134, 134, + /* 1130 */ 133, 133, 132, 132, 132, 131, 128, 451, 1128, 576, + /* 1140 */ 1105, 10, 445, 267, 576, 1554, 134, 134, 134, 134, + /* 1150 */ 133, 133, 132, 132, 132, 131, 128, 451, 532, 576, + /* 1160 */ 922, 576, 19, 19, 576, 1573, 576, 147, 147, 7, + /* 1170 */ 923, 1236, 498, 1236, 576, 487, 413, 552, 285, 1224, + /* 1180 */ 969, 215, 82, 82, 66, 66, 1435, 67, 67, 21, + /* 1190 */ 21, 1110, 1110, 495, 334, 297, 413, 53, 53, 297, + /* 1200 */ 137, 138, 91, 119, 1228, 1228, 1063, 1066, 1053, 1053, + /* 1210 */ 135, 135, 136, 136, 136, 136, 413, 1336, 1311, 446, + /* 1220 */ 137, 138, 91, 227, 1228, 1228, 1063, 1066, 1053, 1053, + /* 1230 */ 135, 135, 136, 136, 136, 136, 574, 1224, 936, 936, + /* 1240 */ 137, 126, 91, 141, 1228, 1228, 1063, 1066, 1053, 1053, + /* 1250 */ 135, 135, 136, 136, 136, 136, 533, 429, 472, 346, + /* 1260 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 1270 */ 128, 451, 576, 457, 233, 343, 1435, 403, 498, 1550, + /* 1280 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 1290 */ 128, 451, 576, 324, 576, 82, 82, 487, 576, 969, + /* 1300 */ 134, 134, 134, 134, 133, 133, 132, 132, 132, 131, + /* 1310 */ 128, 451, 288, 288, 546, 68, 68, 54, 54, 553, + /* 1320 */ 413, 69, 69, 351, 6, 573, 944, 562, 410, 409, + /* 1330 */ 1435, 943, 450, 545, 260, 259, 258, 576, 158, 576, + /* 1340 */ 413, 222, 1180, 479, 969, 138, 91, 430, 1228, 1228, + /* 1350 */ 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, + /* 1360 */ 70, 70, 71, 71, 576, 1126, 91, 576, 1228, 1228, + /* 1370 */ 1063, 1066, 1053, 1053, 135, 135, 136, 136, 136, 136, + /* 1380 */ 1127, 166, 850, 851, 852, 1282, 419, 72, 72, 108, + /* 1390 */ 73, 73, 1310, 358, 1180, 1128, 576, 305, 576, 123, + /* 1400 */ 568, 494, 4, 488, 134, 134, 134, 134, 133, 133, + /* 1410 */ 132, 132, 132, 131, 128, 451, 571, 564, 534, 55, + /* 1420 */ 55, 56, 56, 576, 134, 134, 134, 134, 133, 133, + /* 1430 */ 132, 132, 132, 131, 128, 451, 576, 1104, 233, 1104, + /* 1440 */ 452, 1602, 582, 2, 1259, 576, 57, 57, 576, 321, + /* 1450 */ 576, 155, 565, 1435, 485, 353, 576, 356, 1341, 59, + /* 1460 */ 59, 576, 44, 969, 569, 419, 576, 238, 60, 60, + /* 1470 */ 261, 74, 74, 75, 75, 287, 231, 576, 1366, 76, + /* 1480 */ 76, 1040, 420, 184, 20, 20, 576, 121, 121, 77, + /* 1490 */ 77, 97, 218, 288, 288, 122, 125, 452, 577, 452, + /* 1500 */ 143, 143, 1028, 576, 520, 576, 573, 576, 562, 144, + /* 1510 */ 144, 474, 227, 1244, 478, 123, 568, 576, 4, 320, + /* 1520 */ 567, 245, 411, 576, 443, 411, 78, 78, 62, 62, + /* 1530 */ 79, 79, 571, 319, 1028, 1028, 1030, 1031, 35, 418, + /* 1540 */ 63, 63, 576, 290, 411, 9, 80, 80, 1144, 576, + /* 1550 */ 400, 576, 486, 455, 576, 1223, 452, 576, 325, 342, + /* 1560 */ 576, 111, 576, 1188, 242, 64, 64, 473, 565, 576, + /* 1570 */ 23, 576, 170, 170, 171, 171, 576, 87, 87, 328, + /* 1580 */ 65, 65, 542, 83, 83, 146, 146, 541, 123, 568, + /* 1590 */ 341, 4, 84, 84, 168, 168, 576, 1040, 576, 148, + /* 1600 */ 148, 576, 1380, 121, 121, 571, 1021, 576, 266, 576, + /* 1610 */ 424, 122, 576, 452, 577, 452, 576, 553, 1028, 142, + /* 1620 */ 142, 169, 169, 576, 162, 162, 528, 889, 371, 452, + /* 1630 */ 152, 152, 151, 151, 1379, 149, 149, 109, 370, 150, + /* 1640 */ 150, 565, 576, 480, 576, 266, 86, 86, 576, 1092, + /* 1650 */ 1028, 1028, 1030, 1031, 35, 542, 482, 576, 266, 466, + /* 1660 */ 543, 123, 568, 1616, 4, 88, 88, 85, 85, 475, + /* 1670 */ 1040, 52, 52, 222, 901, 900, 121, 121, 571, 1188, + /* 1680 */ 58, 58, 244, 1032, 122, 889, 452, 577, 452, 908, + /* 1690 */ 909, 1028, 300, 347, 504, 111, 263, 361, 165, 111, + /* 1700 */ 111, 1088, 452, 263, 974, 1153, 266, 1092, 986, 987, + /* 1710 */ 942, 939, 125, 125, 565, 1103, 872, 1103, 159, 941, + /* 1720 */ 1309, 125, 1557, 1028, 1028, 1030, 1031, 35, 542, 337, + /* 1730 */ 1530, 205, 1529, 541, 499, 1589, 490, 348, 1376, 352, + /* 1740 */ 355, 1032, 357, 1040, 359, 1324, 1308, 366, 563, 121, + /* 1750 */ 121, 376, 1188, 1389, 1434, 1362, 280, 122, 1374, 452, + /* 1760 */ 577, 452, 167, 1439, 1028, 1289, 1280, 1268, 1267, 1269, + /* 1770 */ 1609, 1359, 312, 313, 314, 397, 12, 237, 224, 1421, + /* 1780 */ 295, 1416, 1409, 1426, 339, 484, 340, 509, 1371, 1612, + /* 1790 */ 1372, 1425, 1244, 404, 301, 228, 1028, 1028, 1030, 1031, + /* 1800 */ 35, 1601, 1192, 454, 345, 1307, 292, 369, 1502, 1501, + /* 1810 */ 270, 396, 396, 395, 277, 393, 1370, 1369, 859, 1549, + /* 1820 */ 186, 123, 568, 235, 4, 1188, 391, 210, 211, 223, + /* 1830 */ 1547, 239, 1241, 327, 422, 96, 220, 195, 571, 180, + /* 1840 */ 188, 326, 468, 469, 190, 191, 502, 192, 193, 566, + /* 1850 */ 247, 109, 1430, 491, 199, 251, 102, 281, 402, 476, + /* 1860 */ 405, 1496, 452, 497, 253, 1422, 13, 1428, 14, 1427, + /* 1870 */ 203, 1507, 241, 500, 565, 354, 407, 92, 95, 1270, + /* 1880 */ 175, 254, 518, 43, 1327, 255, 1326, 1325, 436, 1518, + /* 1890 */ 350, 1318, 104, 229, 893, 1626, 440, 441, 1625, 408, + /* 1900 */ 240, 1296, 268, 1040, 310, 269, 1297, 527, 444, 121, + /* 1910 */ 121, 368, 1295, 1594, 1624, 311, 1394, 122, 1317, 452, + /* 1920 */ 577, 452, 374, 1580, 1028, 1393, 140, 553, 11, 90, + /* 1930 */ 568, 385, 4, 116, 318, 414, 1579, 110, 1483, 537, + /* 1940 */ 320, 567, 1350, 555, 42, 579, 571, 1349, 1198, 383, + /* 1950 */ 276, 390, 216, 389, 278, 279, 1028, 1028, 1030, 1031, + /* 1960 */ 35, 172, 580, 1265, 458, 1260, 415, 416, 185, 156, + /* 1970 */ 452, 1534, 1535, 173, 1533, 1532, 89, 308, 225, 226, + /* 1980 */ 846, 174, 565, 453, 217, 1188, 322, 236, 1102, 154, + /* 1990 */ 1100, 330, 187, 176, 1223, 243, 189, 925, 338, 246, + /* 2000 */ 1116, 194, 177, 425, 178, 427, 98, 196, 99, 100, + /* 2010 */ 101, 1040, 179, 1119, 1115, 248, 249, 121, 121, 163, + /* 2020 */ 24, 250, 349, 1238, 496, 122, 1108, 452, 577, 452, + /* 2030 */ 1192, 454, 1028, 266, 292, 200, 252, 201, 861, 396, + /* 2040 */ 396, 395, 277, 393, 15, 501, 859, 370, 292, 256, + /* 2050 */ 202, 554, 505, 396, 396, 395, 277, 393, 103, 239, + /* 2060 */ 859, 327, 25, 26, 1028, 1028, 1030, 1031, 35, 326, + /* 2070 */ 362, 510, 891, 239, 365, 327, 513, 904, 105, 309, + /* 2080 */ 164, 181, 27, 326, 106, 521, 107, 1185, 1069, 1155, + /* 2090 */ 17, 1154, 230, 1188, 284, 286, 265, 204, 125, 1171, + /* 2100 */ 241, 28, 978, 972, 29, 41, 1175, 1179, 175, 1173, + /* 2110 */ 30, 43, 31, 8, 241, 1178, 32, 1160, 208, 549, + /* 2120 */ 33, 111, 175, 1083, 1070, 43, 1068, 1072, 240, 113, + /* 2130 */ 114, 34, 561, 118, 1124, 271, 1073, 36, 18, 572, + /* 2140 */ 1033, 873, 240, 124, 37, 935, 272, 273, 1617, 183, + /* 2150 */ 153, 394, 1194, 1193, 1256, 1256, 1256, 1256, 1256, 1256, + /* 2160 */ 1256, 1256, 1256, 414, 1256, 1256, 1256, 1256, 320, 567, + /* 2170 */ 1256, 1256, 1256, 1256, 1256, 1256, 1256, 414, 1256, 1256, + /* 2180 */ 1256, 1256, 320, 567, 1256, 1256, 1256, 1256, 1256, 1256, + /* 2190 */ 1256, 1256, 458, 1256, 1256, 1256, 1256, 1256, 1256, 1256, + /* 2200 */ 1256, 1256, 1256, 1256, 1256, 1256, 458, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 194, 276, 277, 278, 216, 194, 194, 217, 194, 194, - /* 10 */ 194, 194, 224, 194, 194, 276, 277, 278, 204, 19, - /* 20 */ 206, 202, 297, 217, 218, 205, 207, 217, 205, 217, - /* 30 */ 218, 31, 217, 218, 217, 218, 29, 217, 218, 39, - /* 40 */ 33, 217, 220, 43, 44, 45, 46, 47, 48, 49, - /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 312, 19, - /* 60 */ 240, 241, 316, 240, 241, 194, 46, 47, 48, 49, - /* 70 */ 22, 254, 65, 253, 254, 255, 253, 194, 255, 194, - /* 80 */ 263, 258, 259, 43, 44, 45, 46, 47, 48, 49, - /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 276, 277, - /* 100 */ 278, 285, 102, 103, 104, 105, 106, 107, 108, 109, - /* 110 */ 110, 111, 112, 113, 59, 186, 187, 188, 189, 190, - /* 120 */ 191, 310, 239, 317, 318, 196, 86, 198, 88, 317, - /* 130 */ 19, 319, 317, 318, 205, 264, 25, 211, 212, 213, - /* 140 */ 205, 121, 102, 103, 104, 105, 106, 107, 108, 109, - /* 150 */ 110, 111, 112, 113, 43, 44, 45, 46, 47, 48, - /* 160 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 240, - /* 170 */ 241, 116, 117, 118, 119, 240, 241, 122, 123, 124, - /* 180 */ 69, 298, 253, 194, 255, 106, 107, 132, 253, 141, - /* 190 */ 255, 54, 55, 56, 57, 58, 207, 268, 102, 103, - /* 200 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - /* 210 */ 214, 128, 129, 102, 103, 104, 105, 106, 107, 108, - /* 220 */ 109, 110, 111, 112, 113, 134, 25, 136, 137, 300, - /* 230 */ 165, 166, 153, 19, 155, 54, 55, 56, 57, 102, - /* 240 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 250 */ 113, 108, 109, 110, 111, 112, 113, 43, 44, 45, - /* 260 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - /* 270 */ 56, 57, 276, 277, 278, 113, 194, 19, 22, 23, - /* 280 */ 194, 67, 24, 102, 103, 104, 105, 106, 107, 108, - /* 290 */ 109, 110, 111, 112, 113, 220, 250, 59, 252, 217, - /* 300 */ 218, 43, 44, 45, 46, 47, 48, 49, 50, 51, - /* 310 */ 52, 53, 54, 55, 56, 57, 102, 103, 104, 105, - /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 106, 107, - /* 330 */ 108, 109, 110, 111, 112, 113, 254, 59, 205, 138, - /* 340 */ 139, 19, 20, 194, 22, 263, 22, 23, 231, 25, - /* 350 */ 72, 276, 277, 278, 116, 117, 118, 101, 36, 76, - /* 360 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 370 */ 112, 113, 89, 240, 241, 92, 73, 194, 194, 73, - /* 380 */ 19, 59, 188, 189, 190, 191, 253, 81, 255, 151, - /* 390 */ 196, 25, 198, 71, 116, 117, 118, 311, 312, 205, - /* 400 */ 217, 218, 316, 81, 43, 44, 45, 46, 47, 48, - /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 270, - /* 420 */ 22, 23, 100, 25, 59, 101, 138, 139, 106, 107, - /* 430 */ 127, 128, 129, 127, 240, 241, 114, 254, 116, 117, - /* 440 */ 118, 76, 76, 121, 138, 139, 263, 253, 264, 255, - /* 450 */ 205, 275, 87, 19, 89, 89, 194, 92, 92, 199, - /* 460 */ 138, 139, 268, 102, 103, 104, 105, 106, 107, 108, - /* 470 */ 109, 110, 111, 112, 113, 153, 154, 155, 156, 157, - /* 480 */ 81, 116, 117, 118, 129, 240, 241, 224, 19, 226, - /* 490 */ 314, 315, 23, 25, 300, 59, 22, 234, 253, 101, - /* 500 */ 255, 236, 237, 26, 194, 183, 194, 152, 72, 22, - /* 510 */ 145, 150, 43, 44, 45, 46, 47, 48, 49, 50, - /* 520 */ 51, 52, 53, 54, 55, 56, 57, 217, 218, 217, - /* 530 */ 218, 19, 189, 59, 191, 23, 59, 138, 139, 196, - /* 540 */ 135, 198, 232, 283, 232, 140, 59, 287, 205, 275, - /* 550 */ 116, 205, 116, 117, 118, 43, 44, 45, 46, 47, - /* 560 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 570 */ 194, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 580 */ 111, 112, 113, 240, 241, 194, 240, 241, 314, 315, - /* 590 */ 116, 117, 118, 116, 117, 118, 253, 194, 255, 253, - /* 600 */ 59, 255, 19, 116, 117, 118, 23, 22, 217, 218, - /* 610 */ 142, 268, 205, 275, 102, 103, 104, 105, 106, 107, - /* 620 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 630 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 640 */ 57, 19, 194, 300, 59, 23, 119, 240, 241, 122, - /* 650 */ 123, 124, 314, 315, 194, 236, 237, 194, 117, 132, - /* 660 */ 253, 81, 255, 205, 59, 43, 44, 45, 46, 47, - /* 670 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 680 */ 217, 218, 194, 194, 194, 102, 103, 104, 105, 106, - /* 690 */ 107, 108, 109, 110, 111, 112, 113, 294, 240, 241, - /* 700 */ 120, 116, 117, 118, 59, 194, 217, 218, 211, 212, - /* 710 */ 213, 253, 19, 255, 194, 19, 23, 254, 138, 139, - /* 720 */ 24, 232, 117, 194, 102, 103, 104, 105, 106, 107, - /* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 750 */ 57, 19, 264, 108, 76, 23, 127, 128, 129, 311, - /* 760 */ 312, 116, 117, 118, 316, 87, 306, 89, 308, 194, - /* 770 */ 92, 22, 59, 194, 22, 43, 44, 45, 46, 47, - /* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 790 */ 194, 95, 217, 218, 265, 102, 103, 104, 105, 106, - /* 800 */ 107, 108, 109, 110, 111, 112, 113, 232, 59, 113, - /* 810 */ 25, 59, 194, 217, 218, 119, 120, 121, 122, 123, - /* 820 */ 124, 125, 19, 145, 194, 194, 23, 131, 232, 116, - /* 830 */ 117, 118, 35, 194, 102, 103, 104, 105, 106, 107, - /* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 860 */ 57, 19, 194, 66, 194, 116, 117, 118, 116, 117, - /* 870 */ 118, 74, 242, 294, 194, 194, 206, 23, 194, 25, - /* 880 */ 194, 111, 112, 113, 25, 43, 44, 45, 46, 47, - /* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 900 */ 24, 194, 194, 217, 218, 102, 103, 104, 105, 106, - /* 910 */ 107, 108, 109, 110, 111, 112, 113, 241, 232, 194, - /* 920 */ 212, 213, 242, 242, 217, 218, 242, 130, 11, 253, - /* 930 */ 194, 255, 19, 265, 149, 59, 306, 194, 308, 232, - /* 940 */ 309, 310, 217, 218, 102, 103, 104, 105, 106, 107, - /* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, - /* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 970 */ 57, 194, 194, 59, 194, 239, 19, 194, 25, 254, - /* 980 */ 303, 304, 23, 194, 25, 126, 306, 306, 308, 308, - /* 990 */ 306, 271, 308, 117, 286, 217, 218, 217, 218, 194, - /* 1000 */ 194, 159, 45, 46, 47, 48, 49, 50, 51, 52, - /* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106, - /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 59, 239, 194, - /* 1030 */ 116, 117, 118, 260, 254, 194, 240, 241, 194, 233, - /* 1040 */ 205, 240, 241, 205, 239, 128, 129, 270, 265, 253, - /* 1050 */ 194, 255, 217, 218, 253, 194, 255, 143, 280, 102, - /* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 1070 */ 113, 118, 159, 217, 218, 240, 241, 118, 240, 241, - /* 1080 */ 194, 194, 194, 239, 116, 117, 118, 22, 253, 254, - /* 1090 */ 255, 253, 19, 255, 233, 194, 143, 24, 263, 212, - /* 1100 */ 213, 194, 143, 217, 218, 217, 218, 261, 262, 271, - /* 1110 */ 254, 143, 19, 7, 8, 9, 43, 44, 45, 46, - /* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1130 */ 57, 16, 19, 22, 23, 294, 43, 44, 45, 46, - /* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1150 */ 57, 312, 194, 214, 21, 316, 43, 44, 45, 46, - /* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1170 */ 57, 106, 107, 286, 194, 102, 103, 104, 105, 106, - /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 207, 158, 59, - /* 1190 */ 160, 22, 77, 24, 79, 102, 103, 104, 105, 106, - /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 194, 194, 229, - /* 1210 */ 194, 231, 101, 80, 22, 102, 103, 104, 105, 106, - /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 288, 59, 12, - /* 1230 */ 217, 218, 293, 217, 218, 19, 106, 107, 59, 19, - /* 1240 */ 16, 127, 128, 129, 27, 115, 116, 117, 118, 194, - /* 1250 */ 120, 59, 22, 194, 24, 194, 123, 100, 128, 42, - /* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 1270 */ 54, 55, 56, 57, 117, 194, 217, 218, 121, 100, - /* 1280 */ 63, 194, 245, 153, 194, 155, 117, 19, 115, 194, - /* 1290 */ 73, 214, 194, 256, 161, 116, 117, 194, 217, 218, - /* 1300 */ 121, 77, 194, 79, 217, 218, 194, 217, 218, 117, - /* 1310 */ 153, 154, 155, 254, 46, 217, 218, 144, 102, 103, - /* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - /* 1330 */ 232, 270, 153, 154, 155, 115, 116, 66, 19, 20, - /* 1340 */ 183, 22, 12, 312, 254, 194, 262, 316, 209, 210, - /* 1350 */ 266, 239, 194, 194, 108, 36, 85, 27, 19, 20, - /* 1360 */ 265, 22, 183, 245, 144, 94, 25, 48, 217, 218, - /* 1370 */ 293, 194, 42, 270, 256, 36, 217, 218, 59, 194, - /* 1380 */ 25, 135, 194, 115, 194, 161, 140, 194, 194, 15, - /* 1390 */ 71, 194, 312, 63, 217, 218, 316, 194, 59, 131, - /* 1400 */ 301, 302, 217, 218, 85, 217, 218, 217, 218, 90, - /* 1410 */ 71, 217, 218, 19, 217, 218, 245, 146, 262, 100, - /* 1420 */ 217, 218, 266, 265, 85, 106, 107, 256, 312, 90, - /* 1430 */ 209, 210, 316, 114, 60, 116, 117, 118, 194, 100, - /* 1440 */ 121, 194, 194, 145, 115, 106, 107, 19, 46, 19, - /* 1450 */ 20, 24, 22, 114, 194, 116, 117, 118, 194, 245, - /* 1460 */ 121, 194, 164, 194, 217, 218, 36, 194, 258, 259, - /* 1470 */ 256, 194, 153, 154, 155, 156, 157, 217, 218, 150, - /* 1480 */ 31, 217, 218, 142, 217, 218, 217, 218, 39, 59, - /* 1490 */ 217, 218, 153, 154, 155, 156, 157, 149, 150, 5, - /* 1500 */ 145, 71, 183, 245, 10, 11, 12, 13, 14, 194, - /* 1510 */ 116, 17, 129, 227, 256, 85, 194, 115, 194, 23, - /* 1520 */ 90, 25, 183, 99, 30, 97, 32, 22, 22, 194, - /* 1530 */ 100, 194, 217, 218, 40, 152, 106, 107, 23, 217, - /* 1540 */ 218, 194, 19, 20, 114, 22, 116, 117, 118, 257, - /* 1550 */ 194, 121, 217, 218, 217, 218, 194, 133, 53, 36, - /* 1560 */ 23, 23, 25, 25, 70, 120, 121, 61, 141, 7, - /* 1570 */ 8, 121, 78, 217, 218, 81, 23, 227, 25, 217, - /* 1580 */ 218, 131, 59, 153, 154, 155, 156, 157, 0, 1, - /* 1590 */ 2, 59, 98, 5, 71, 23, 227, 25, 10, 11, - /* 1600 */ 12, 13, 14, 83, 84, 17, 23, 23, 25, 25, - /* 1610 */ 59, 194, 194, 183, 23, 23, 25, 25, 30, 194, - /* 1620 */ 32, 19, 20, 100, 22, 194, 194, 133, 40, 106, - /* 1630 */ 107, 108, 138, 139, 194, 217, 218, 114, 36, 116, - /* 1640 */ 117, 118, 217, 218, 121, 194, 194, 194, 23, 117, - /* 1650 */ 25, 194, 23, 23, 25, 25, 162, 194, 70, 194, - /* 1660 */ 145, 59, 23, 153, 25, 155, 78, 194, 117, 81, - /* 1670 */ 217, 218, 194, 71, 217, 218, 153, 154, 155, 156, - /* 1680 */ 157, 194, 217, 218, 194, 23, 98, 25, 321, 194, - /* 1690 */ 217, 218, 194, 19, 20, 194, 22, 153, 23, 155, - /* 1700 */ 25, 194, 100, 194, 217, 218, 183, 194, 106, 107, - /* 1710 */ 36, 194, 217, 218, 237, 194, 114, 243, 116, 117, - /* 1720 */ 118, 133, 194, 121, 217, 218, 138, 139, 194, 194, - /* 1730 */ 194, 290, 289, 59, 217, 218, 194, 194, 217, 218, - /* 1740 */ 194, 194, 140, 194, 194, 71, 194, 244, 194, 194, - /* 1750 */ 162, 217, 218, 194, 194, 153, 154, 155, 156, 157, - /* 1760 */ 217, 218, 194, 217, 218, 194, 217, 218, 257, 217, - /* 1770 */ 218, 217, 218, 257, 100, 194, 257, 217, 218, 257, - /* 1780 */ 106, 107, 215, 299, 194, 183, 192, 194, 114, 194, - /* 1790 */ 116, 117, 118, 1, 2, 121, 221, 5, 217, 218, - /* 1800 */ 273, 197, 10, 11, 12, 13, 14, 217, 218, 17, - /* 1810 */ 217, 218, 217, 218, 140, 194, 246, 194, 273, 295, - /* 1820 */ 247, 273, 30, 247, 32, 269, 269, 153, 154, 155, - /* 1830 */ 156, 157, 40, 246, 273, 295, 230, 226, 217, 218, - /* 1840 */ 217, 218, 220, 261, 220, 282, 220, 19, 20, 244, - /* 1850 */ 22, 250, 141, 250, 246, 60, 201, 183, 261, 261, - /* 1860 */ 261, 201, 70, 299, 36, 299, 201, 38, 151, 150, - /* 1870 */ 78, 285, 22, 81, 296, 296, 43, 235, 18, 238, - /* 1880 */ 201, 274, 272, 238, 238, 238, 18, 59, 200, 149, - /* 1890 */ 98, 247, 274, 274, 235, 247, 247, 247, 235, 71, - /* 1900 */ 272, 201, 200, 158, 292, 62, 291, 201, 200, 22, - /* 1910 */ 201, 222, 200, 222, 201, 200, 115, 219, 219, 64, - /* 1920 */ 219, 228, 22, 126, 221, 133, 165, 222, 100, 225, - /* 1930 */ 138, 139, 225, 219, 106, 107, 24, 219, 228, 219, - /* 1940 */ 219, 307, 114, 113, 116, 117, 118, 315, 284, 121, - /* 1950 */ 284, 222, 201, 91, 162, 320, 320, 82, 148, 267, - /* 1960 */ 145, 267, 22, 279, 201, 158, 281, 251, 147, 146, - /* 1970 */ 25, 203, 250, 249, 251, 248, 13, 247, 195, 195, - /* 1980 */ 6, 153, 154, 155, 156, 157, 193, 193, 305, 193, - /* 1990 */ 208, 305, 302, 214, 214, 214, 208, 223, 223, 214, - /* 2000 */ 4, 215, 215, 214, 3, 22, 208, 163, 15, 23, - /* 2010 */ 16, 183, 23, 139, 151, 130, 25, 20, 142, 24, - /* 2020 */ 16, 144, 1, 142, 130, 130, 61, 37, 53, 151, - /* 2030 */ 53, 53, 53, 130, 116, 1, 34, 141, 5, 22, - /* 2040 */ 115, 161, 75, 25, 68, 141, 41, 115, 68, 24, - /* 2050 */ 20, 19, 131, 125, 67, 67, 96, 22, 22, 22, - /* 2060 */ 37, 23, 22, 24, 22, 59, 67, 23, 149, 28, - /* 2070 */ 22, 25, 23, 23, 23, 22, 141, 34, 97, 23, - /* 2080 */ 23, 34, 116, 22, 143, 25, 34, 75, 34, 34, - /* 2090 */ 75, 88, 34, 86, 23, 22, 34, 25, 24, 34, - /* 2100 */ 25, 93, 23, 44, 142, 23, 142, 23, 23, 22, - /* 2110 */ 11, 25, 23, 25, 23, 22, 22, 22, 1, 23, - /* 2120 */ 23, 23, 22, 22, 15, 141, 141, 25, 25, 1, - /* 2130 */ 322, 322, 322, 135, 322, 322, 322, 322, 322, 322, - /* 2140 */ 322, 141, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2150 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2160 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2170 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2180 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2190 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2200 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2210 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2220 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2230 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2240 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2250 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2260 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2270 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2280 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2290 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2300 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2310 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, - /* 2320 */ 322, 322, 322, 322, 322, 322, 322, 322, + /* 0 */ 277, 278, 279, 241, 242, 225, 195, 227, 195, 241, + /* 10 */ 242, 195, 217, 221, 195, 235, 254, 195, 256, 19, + /* 20 */ 225, 298, 254, 195, 256, 206, 213, 214, 206, 218, + /* 30 */ 219, 31, 206, 195, 218, 219, 195, 218, 219, 39, + /* 40 */ 218, 219, 313, 43, 44, 45, 317, 47, 48, 49, + /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 19, + /* 60 */ 241, 242, 195, 241, 242, 195, 255, 241, 242, 277, + /* 70 */ 278, 279, 234, 254, 255, 256, 254, 255, 256, 218, + /* 80 */ 254, 240, 256, 43, 44, 45, 264, 47, 48, 49, + /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 271, + /* 100 */ 287, 22, 23, 103, 104, 105, 106, 107, 108, 109, + /* 110 */ 110, 111, 112, 113, 114, 114, 47, 48, 49, 50, + /* 120 */ 187, 188, 189, 190, 191, 192, 190, 87, 192, 89, + /* 130 */ 197, 19, 199, 197, 318, 199, 320, 25, 195, 206, + /* 140 */ 299, 271, 206, 103, 104, 105, 106, 107, 108, 109, + /* 150 */ 110, 111, 112, 113, 114, 43, 44, 45, 195, 47, + /* 160 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 170 */ 58, 60, 21, 195, 241, 242, 215, 241, 242, 312, + /* 180 */ 313, 102, 70, 205, 317, 207, 242, 254, 77, 256, + /* 190 */ 254, 122, 256, 55, 56, 57, 58, 59, 254, 88, + /* 200 */ 256, 90, 269, 240, 93, 269, 107, 108, 109, 110, + /* 210 */ 111, 112, 113, 114, 271, 103, 104, 105, 106, 107, + /* 220 */ 108, 109, 110, 111, 112, 113, 114, 313, 117, 118, + /* 230 */ 119, 317, 81, 195, 301, 19, 195, 301, 277, 278, + /* 240 */ 279, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 250 */ 112, 113, 114, 55, 56, 57, 58, 146, 195, 43, + /* 260 */ 44, 45, 74, 47, 48, 49, 50, 51, 52, 53, + /* 270 */ 54, 55, 56, 57, 58, 124, 195, 60, 109, 110, + /* 280 */ 111, 112, 113, 114, 68, 195, 103, 104, 105, 106, + /* 290 */ 107, 108, 109, 110, 111, 112, 113, 114, 208, 218, + /* 300 */ 219, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 310 */ 112, 113, 114, 162, 233, 24, 128, 129, 130, 103, + /* 320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + /* 330 */ 114, 195, 195, 215, 117, 118, 119, 120, 195, 19, + /* 340 */ 123, 124, 125, 207, 24, 74, 246, 60, 310, 311, + /* 350 */ 133, 60, 311, 82, 22, 218, 219, 257, 195, 19, + /* 360 */ 73, 218, 219, 43, 44, 45, 206, 47, 48, 49, + /* 370 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 22, + /* 380 */ 23, 218, 219, 43, 44, 45, 54, 47, 48, 49, + /* 390 */ 50, 51, 52, 53, 54, 55, 56, 57, 58, 128, + /* 400 */ 82, 241, 242, 195, 117, 118, 119, 289, 60, 118, + /* 410 */ 139, 140, 294, 195, 254, 195, 256, 195, 255, 259, + /* 420 */ 260, 73, 22, 103, 104, 105, 106, 107, 108, 109, + /* 430 */ 110, 111, 112, 113, 114, 206, 218, 219, 218, 219, + /* 440 */ 218, 219, 234, 103, 104, 105, 106, 107, 108, 109, + /* 450 */ 110, 111, 112, 113, 114, 318, 319, 139, 140, 102, + /* 460 */ 60, 318, 319, 221, 19, 117, 118, 119, 23, 195, + /* 470 */ 241, 242, 313, 255, 206, 255, 317, 255, 206, 129, + /* 480 */ 130, 206, 264, 254, 264, 256, 264, 195, 43, 44, + /* 490 */ 45, 151, 47, 48, 49, 50, 51, 52, 53, 54, + /* 500 */ 55, 56, 57, 58, 246, 213, 214, 19, 19, 241, + /* 510 */ 242, 195, 23, 241, 242, 257, 241, 242, 118, 277, + /* 520 */ 278, 279, 254, 29, 256, 60, 254, 33, 256, 254, + /* 530 */ 206, 256, 43, 44, 45, 218, 47, 48, 49, 50, + /* 540 */ 51, 52, 53, 54, 55, 56, 57, 58, 103, 104, + /* 550 */ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + /* 560 */ 66, 19, 218, 60, 120, 241, 242, 123, 124, 125, + /* 570 */ 60, 232, 77, 19, 20, 26, 22, 133, 254, 287, + /* 580 */ 256, 265, 117, 118, 119, 90, 312, 313, 93, 47, + /* 590 */ 36, 317, 103, 104, 105, 106, 107, 108, 109, 110, + /* 600 */ 111, 112, 113, 114, 116, 117, 277, 278, 279, 60, + /* 610 */ 107, 108, 19, 276, 60, 31, 23, 152, 195, 116, + /* 620 */ 117, 118, 119, 39, 121, 276, 72, 117, 118, 119, + /* 630 */ 166, 167, 129, 145, 237, 238, 43, 44, 45, 276, + /* 640 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 650 */ 57, 58, 315, 316, 144, 101, 19, 154, 116, 156, + /* 660 */ 23, 107, 108, 109, 315, 316, 117, 118, 119, 115, + /* 670 */ 60, 117, 118, 119, 132, 200, 122, 60, 315, 316, + /* 680 */ 43, 44, 45, 272, 47, 48, 49, 50, 51, 52, + /* 690 */ 53, 54, 55, 56, 57, 58, 103, 104, 105, 106, + /* 700 */ 107, 108, 109, 110, 111, 112, 113, 114, 154, 155, + /* 710 */ 156, 157, 158, 212, 213, 214, 22, 195, 101, 22, + /* 720 */ 60, 19, 20, 60, 22, 139, 140, 117, 118, 119, + /* 730 */ 22, 251, 195, 253, 117, 118, 195, 183, 36, 122, + /* 740 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 750 */ 113, 114, 195, 195, 60, 218, 219, 60, 195, 284, + /* 760 */ 19, 25, 60, 288, 23, 237, 238, 22, 60, 109, + /* 770 */ 233, 154, 155, 156, 72, 218, 219, 117, 118, 119, + /* 780 */ 117, 118, 119, 116, 43, 44, 45, 265, 47, 48, + /* 790 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + /* 800 */ 183, 243, 25, 101, 19, 60, 265, 144, 23, 107, + /* 810 */ 108, 117, 118, 119, 117, 118, 119, 115, 151, 117, + /* 820 */ 118, 119, 82, 195, 122, 117, 118, 119, 43, 44, + /* 830 */ 45, 195, 47, 48, 49, 50, 51, 52, 53, 54, + /* 840 */ 55, 56, 57, 58, 103, 104, 105, 106, 107, 108, + /* 850 */ 109, 110, 111, 112, 113, 114, 154, 155, 156, 157, + /* 860 */ 158, 121, 117, 118, 119, 307, 101, 309, 195, 22, + /* 870 */ 23, 195, 25, 19, 35, 139, 140, 195, 24, 139, + /* 880 */ 140, 208, 195, 118, 109, 183, 22, 122, 103, 104, + /* 890 */ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + /* 900 */ 304, 305, 77, 230, 127, 232, 67, 195, 19, 195, + /* 910 */ 195, 136, 23, 88, 75, 90, 141, 203, 93, 154, + /* 920 */ 155, 156, 208, 295, 60, 243, 22, 23, 19, 25, + /* 930 */ 218, 219, 43, 44, 45, 100, 47, 48, 49, 50, + /* 940 */ 51, 52, 53, 54, 55, 56, 57, 58, 183, 102, + /* 950 */ 96, 195, 43, 44, 45, 240, 47, 48, 49, 50, + /* 960 */ 51, 52, 53, 54, 55, 56, 57, 58, 114, 134, + /* 970 */ 131, 146, 25, 286, 120, 121, 122, 123, 124, 125, + /* 980 */ 126, 117, 118, 119, 313, 195, 132, 195, 317, 307, + /* 990 */ 195, 309, 103, 104, 105, 106, 107, 108, 109, 110, + /* 1000 */ 111, 112, 113, 114, 195, 195, 102, 195, 195, 195, + /* 1010 */ 218, 219, 103, 104, 105, 106, 107, 108, 109, 110, + /* 1020 */ 111, 112, 113, 114, 77, 233, 195, 60, 218, 219, + /* 1030 */ 218, 219, 218, 219, 23, 195, 25, 90, 243, 159, + /* 1040 */ 93, 161, 19, 233, 195, 233, 23, 233, 16, 218, + /* 1050 */ 219, 195, 243, 212, 213, 214, 262, 263, 218, 219, + /* 1060 */ 195, 271, 19, 307, 233, 309, 43, 44, 45, 160, + /* 1070 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 1080 */ 57, 58, 195, 218, 219, 118, 43, 44, 45, 240, + /* 1090 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + /* 1100 */ 57, 58, 307, 195, 309, 218, 219, 263, 12, 195, + /* 1110 */ 78, 267, 80, 112, 113, 114, 307, 22, 309, 24, + /* 1120 */ 255, 281, 266, 27, 107, 108, 103, 104, 105, 106, + /* 1130 */ 107, 108, 109, 110, 111, 112, 113, 114, 42, 195, + /* 1140 */ 11, 22, 255, 24, 195, 195, 103, 104, 105, 106, + /* 1150 */ 107, 108, 109, 110, 111, 112, 113, 114, 19, 195, + /* 1160 */ 64, 195, 218, 219, 195, 313, 195, 218, 219, 317, + /* 1170 */ 74, 154, 195, 156, 195, 195, 19, 233, 23, 60, + /* 1180 */ 25, 24, 218, 219, 218, 219, 195, 218, 219, 218, + /* 1190 */ 219, 128, 129, 130, 162, 263, 19, 218, 219, 267, + /* 1200 */ 43, 44, 45, 160, 47, 48, 49, 50, 51, 52, + /* 1210 */ 53, 54, 55, 56, 57, 58, 19, 240, 228, 255, + /* 1220 */ 43, 44, 45, 25, 47, 48, 49, 50, 51, 52, + /* 1230 */ 53, 54, 55, 56, 57, 58, 135, 118, 137, 138, + /* 1240 */ 43, 44, 45, 22, 47, 48, 49, 50, 51, 52, + /* 1250 */ 53, 54, 55, 56, 57, 58, 117, 266, 129, 130, + /* 1260 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 1270 */ 113, 114, 195, 195, 119, 295, 195, 206, 195, 195, + /* 1280 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 1290 */ 113, 114, 195, 195, 195, 218, 219, 195, 195, 144, + /* 1300 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 1310 */ 113, 114, 241, 242, 67, 218, 219, 218, 219, 146, + /* 1320 */ 19, 218, 219, 240, 215, 254, 136, 256, 107, 108, + /* 1330 */ 195, 141, 255, 86, 128, 129, 130, 195, 165, 195, + /* 1340 */ 19, 143, 95, 272, 25, 44, 45, 266, 47, 48, + /* 1350 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + /* 1360 */ 218, 219, 218, 219, 195, 12, 45, 195, 47, 48, + /* 1370 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + /* 1380 */ 27, 23, 7, 8, 9, 210, 211, 218, 219, 116, + /* 1390 */ 218, 219, 228, 16, 147, 42, 195, 295, 195, 19, + /* 1400 */ 20, 266, 22, 294, 103, 104, 105, 106, 107, 108, + /* 1410 */ 109, 110, 111, 112, 113, 114, 36, 64, 145, 218, + /* 1420 */ 219, 218, 219, 195, 103, 104, 105, 106, 107, 108, + /* 1430 */ 109, 110, 111, 112, 113, 114, 195, 154, 119, 156, + /* 1440 */ 60, 189, 190, 191, 192, 195, 218, 219, 195, 197, + /* 1450 */ 195, 199, 72, 195, 19, 78, 195, 80, 206, 218, + /* 1460 */ 219, 195, 82, 144, 210, 211, 195, 15, 218, 219, + /* 1470 */ 47, 218, 219, 218, 219, 259, 260, 195, 261, 218, + /* 1480 */ 219, 101, 302, 303, 218, 219, 195, 107, 108, 218, + /* 1490 */ 219, 150, 151, 241, 242, 115, 25, 117, 118, 119, + /* 1500 */ 218, 219, 122, 195, 146, 195, 254, 195, 256, 218, + /* 1510 */ 219, 246, 25, 61, 246, 19, 20, 195, 22, 139, + /* 1520 */ 140, 269, 257, 195, 266, 257, 218, 219, 218, 219, + /* 1530 */ 218, 219, 36, 246, 154, 155, 156, 157, 158, 116, + /* 1540 */ 218, 219, 195, 22, 257, 49, 218, 219, 23, 195, + /* 1550 */ 25, 195, 117, 301, 195, 25, 60, 195, 195, 23, + /* 1560 */ 195, 25, 195, 183, 24, 218, 219, 130, 72, 195, + /* 1570 */ 22, 195, 218, 219, 218, 219, 195, 218, 219, 195, + /* 1580 */ 218, 219, 86, 218, 219, 218, 219, 91, 19, 20, + /* 1590 */ 153, 22, 218, 219, 218, 219, 195, 101, 195, 218, + /* 1600 */ 219, 195, 195, 107, 108, 36, 23, 195, 25, 195, + /* 1610 */ 62, 115, 195, 117, 118, 119, 195, 146, 122, 218, + /* 1620 */ 219, 218, 219, 195, 218, 219, 19, 60, 122, 60, + /* 1630 */ 218, 219, 218, 219, 195, 218, 219, 150, 132, 218, + /* 1640 */ 219, 72, 195, 23, 195, 25, 218, 219, 195, 60, + /* 1650 */ 154, 155, 156, 157, 158, 86, 23, 195, 25, 195, + /* 1660 */ 91, 19, 20, 142, 22, 218, 219, 218, 219, 130, + /* 1670 */ 101, 218, 219, 143, 121, 122, 107, 108, 36, 183, + /* 1680 */ 218, 219, 142, 60, 115, 118, 117, 118, 119, 7, + /* 1690 */ 8, 122, 153, 23, 23, 25, 25, 23, 23, 25, + /* 1700 */ 25, 23, 60, 25, 23, 98, 25, 118, 84, 85, + /* 1710 */ 23, 23, 25, 25, 72, 154, 23, 156, 25, 23, + /* 1720 */ 228, 25, 195, 154, 155, 156, 157, 158, 86, 195, + /* 1730 */ 195, 258, 195, 91, 291, 322, 195, 195, 195, 195, + /* 1740 */ 195, 118, 195, 101, 195, 195, 195, 195, 238, 107, + /* 1750 */ 108, 195, 183, 195, 195, 195, 290, 115, 195, 117, + /* 1760 */ 118, 119, 244, 195, 122, 195, 195, 195, 195, 195, + /* 1770 */ 195, 258, 258, 258, 258, 193, 245, 300, 216, 274, + /* 1780 */ 247, 270, 270, 274, 296, 296, 248, 222, 262, 198, + /* 1790 */ 262, 274, 61, 274, 248, 231, 154, 155, 156, 157, + /* 1800 */ 158, 0, 1, 2, 247, 227, 5, 221, 221, 221, + /* 1810 */ 142, 10, 11, 12, 13, 14, 262, 262, 17, 202, + /* 1820 */ 300, 19, 20, 300, 22, 183, 247, 251, 251, 245, + /* 1830 */ 202, 30, 38, 32, 202, 152, 151, 22, 36, 43, + /* 1840 */ 236, 40, 18, 202, 239, 239, 18, 239, 239, 283, + /* 1850 */ 201, 150, 236, 202, 236, 201, 159, 202, 248, 248, + /* 1860 */ 248, 248, 60, 63, 201, 275, 273, 275, 273, 275, + /* 1870 */ 22, 286, 71, 223, 72, 202, 223, 297, 297, 202, + /* 1880 */ 79, 201, 116, 82, 220, 201, 220, 220, 65, 293, + /* 1890 */ 292, 229, 22, 166, 127, 226, 24, 114, 226, 223, + /* 1900 */ 99, 222, 202, 101, 285, 92, 220, 308, 83, 107, + /* 1910 */ 108, 220, 220, 316, 220, 285, 268, 115, 229, 117, + /* 1920 */ 118, 119, 223, 321, 122, 268, 149, 146, 22, 19, + /* 1930 */ 20, 202, 22, 159, 282, 134, 321, 148, 280, 147, + /* 1940 */ 139, 140, 252, 141, 25, 204, 36, 252, 13, 251, + /* 1950 */ 196, 248, 250, 249, 196, 6, 154, 155, 156, 157, + /* 1960 */ 158, 209, 194, 194, 163, 194, 306, 306, 303, 224, + /* 1970 */ 60, 215, 215, 209, 215, 215, 215, 224, 216, 216, + /* 1980 */ 4, 209, 72, 3, 22, 183, 164, 15, 23, 16, + /* 1990 */ 23, 140, 152, 131, 25, 24, 143, 20, 16, 145, + /* 2000 */ 1, 143, 131, 62, 131, 37, 54, 152, 54, 54, + /* 2010 */ 54, 101, 131, 117, 1, 34, 142, 107, 108, 5, + /* 2020 */ 22, 116, 162, 76, 41, 115, 69, 117, 118, 119, + /* 2030 */ 1, 2, 122, 25, 5, 69, 142, 116, 20, 10, + /* 2040 */ 11, 12, 13, 14, 24, 19, 17, 132, 5, 126, + /* 2050 */ 22, 141, 68, 10, 11, 12, 13, 14, 22, 30, + /* 2060 */ 17, 32, 22, 22, 154, 155, 156, 157, 158, 40, + /* 2070 */ 23, 68, 60, 30, 24, 32, 97, 28, 22, 68, + /* 2080 */ 23, 37, 34, 40, 150, 22, 25, 23, 23, 23, + /* 2090 */ 22, 98, 142, 183, 23, 23, 34, 22, 25, 89, + /* 2100 */ 71, 34, 117, 144, 34, 22, 76, 76, 79, 87, + /* 2110 */ 34, 82, 34, 44, 71, 94, 34, 23, 25, 24, + /* 2120 */ 34, 25, 79, 23, 23, 82, 23, 23, 99, 143, + /* 2130 */ 143, 22, 25, 25, 23, 22, 11, 22, 22, 25, + /* 2140 */ 23, 23, 99, 22, 22, 136, 142, 142, 142, 25, + /* 2150 */ 23, 15, 1, 1, 323, 323, 323, 323, 323, 323, + /* 2160 */ 323, 323, 323, 134, 323, 323, 323, 323, 139, 140, + /* 2170 */ 323, 323, 323, 323, 323, 323, 323, 134, 323, 323, + /* 2180 */ 323, 323, 139, 140, 323, 323, 323, 323, 323, 323, + /* 2190 */ 323, 323, 163, 323, 323, 323, 323, 323, 323, 323, + /* 2200 */ 323, 323, 323, 323, 323, 323, 163, 323, 323, 323, + /* 2210 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2220 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2230 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2240 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2250 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2260 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2270 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2280 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2290 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2300 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2310 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2320 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2330 */ 323, 323, 323, 323, 323, 323, 323, 323, 323, 323, + /* 2340 */ 323, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2350 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2360 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2370 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2380 */ 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, + /* 2390 */ 187, 187, 187, 187, }; #define YY_SHIFT_COUNT (582) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2128) +#define YY_SHIFT_MAX (2152) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 1792, 1588, 1494, 322, 322, 399, 306, 1319, 1339, 1430, - /* 10 */ 1828, 1828, 1828, 580, 399, 399, 399, 399, 399, 0, - /* 20 */ 0, 214, 1093, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 30 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1130, 1130, - /* 40 */ 365, 365, 55, 278, 436, 713, 713, 201, 201, 201, - /* 50 */ 201, 40, 111, 258, 361, 469, 512, 583, 622, 693, - /* 60 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093, - /* 70 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, - /* 80 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1523, 1602, - /* 90 */ 1674, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 100 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 110 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 120 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 130 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, - /* 140 */ 137, 181, 181, 181, 181, 181, 181, 181, 96, 222, - /* 150 */ 143, 477, 713, 1133, 1268, 713, 713, 79, 79, 713, - /* 160 */ 770, 83, 65, 65, 65, 288, 162, 162, 2142, 2142, - /* 170 */ 696, 696, 696, 238, 474, 474, 474, 474, 1217, 1217, - /* 180 */ 678, 477, 324, 398, 713, 713, 713, 713, 713, 713, - /* 190 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, - /* 200 */ 713, 713, 713, 1220, 366, 366, 713, 917, 283, 283, - /* 210 */ 434, 434, 605, 605, 1298, 2142, 2142, 2142, 2142, 2142, - /* 220 */ 2142, 2142, 1179, 1157, 1157, 487, 527, 585, 645, 749, - /* 230 */ 914, 968, 752, 713, 713, 713, 713, 713, 713, 713, - /* 240 */ 713, 713, 713, 303, 713, 713, 713, 713, 713, 713, - /* 250 */ 713, 713, 713, 713, 713, 713, 797, 797, 797, 713, - /* 260 */ 713, 713, 959, 713, 713, 713, 1169, 1271, 713, 713, - /* 270 */ 1330, 713, 713, 713, 713, 713, 713, 713, 713, 629, - /* 280 */ 7, 91, 876, 876, 876, 876, 953, 91, 91, 1246, - /* 290 */ 1065, 1106, 1374, 1329, 1348, 468, 1348, 1394, 785, 1329, - /* 300 */ 1329, 785, 1329, 468, 1394, 859, 854, 1402, 1449, 1449, - /* 310 */ 1449, 1173, 1173, 1173, 1173, 1355, 1355, 1030, 1341, 405, - /* 320 */ 1230, 1795, 1795, 1711, 1711, 1829, 1829, 1711, 1717, 1719, - /* 330 */ 1850, 1833, 1860, 1860, 1860, 1860, 1711, 1868, 1740, 1719, - /* 340 */ 1719, 1740, 1850, 1833, 1740, 1833, 1740, 1711, 1868, 1745, - /* 350 */ 1843, 1711, 1868, 1887, 1711, 1868, 1711, 1868, 1887, 1801, - /* 360 */ 1801, 1801, 1855, 1900, 1900, 1887, 1801, 1797, 1801, 1855, - /* 370 */ 1801, 1801, 1761, 1912, 1830, 1830, 1887, 1711, 1862, 1862, - /* 380 */ 1875, 1875, 1810, 1815, 1940, 1711, 1807, 1810, 1821, 1823, - /* 390 */ 1740, 1945, 1963, 1963, 1974, 1974, 1974, 2142, 2142, 2142, - /* 400 */ 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, 2142, - /* 410 */ 2142, 2142, 20, 1224, 256, 1111, 1115, 1114, 1192, 1496, - /* 420 */ 1424, 1505, 1427, 355, 1383, 1537, 1506, 1538, 1553, 1583, - /* 430 */ 1584, 1591, 1625, 541, 1445, 1562, 1450, 1572, 1515, 1428, - /* 440 */ 1532, 1592, 1629, 1520, 1630, 1639, 1510, 1544, 1662, 1675, - /* 450 */ 1551, 48, 1996, 2001, 1983, 1844, 1993, 1994, 1986, 1989, - /* 460 */ 1874, 1863, 1885, 1991, 1991, 1995, 1876, 1997, 1877, 2004, - /* 470 */ 2021, 1881, 1894, 1991, 1895, 1965, 1990, 1991, 1878, 1975, - /* 480 */ 1977, 1978, 1979, 1903, 1918, 2002, 1896, 2034, 2033, 2017, - /* 490 */ 1925, 1880, 1976, 2018, 1980, 1967, 2005, 1904, 1932, 2025, - /* 500 */ 2030, 2032, 1921, 1928, 2035, 1987, 2036, 2037, 2038, 2040, - /* 510 */ 1988, 2006, 2039, 1960, 2041, 2042, 1999, 2023, 2044, 2043, - /* 520 */ 1919, 2048, 2049, 2050, 2046, 2051, 2053, 1981, 1935, 2056, - /* 530 */ 2057, 1966, 2047, 2061, 1941, 2060, 2052, 2054, 2055, 2058, - /* 540 */ 2003, 2012, 2007, 2059, 2015, 2008, 2062, 2071, 2073, 2074, - /* 550 */ 2072, 2075, 2065, 1962, 1964, 2079, 2060, 2082, 2084, 2085, - /* 560 */ 2087, 2086, 2089, 2088, 2091, 2093, 2099, 2094, 2095, 2096, - /* 570 */ 2097, 2100, 2101, 2102, 1998, 1984, 1985, 2000, 2103, 2098, - /* 580 */ 2109, 2117, 2128, + /* 0 */ 2029, 1801, 2043, 1380, 1380, 318, 271, 1496, 1569, 1642, + /* 10 */ 702, 702, 702, 740, 318, 318, 318, 318, 318, 0, + /* 20 */ 0, 216, 1177, 702, 702, 702, 702, 702, 702, 702, + /* 30 */ 702, 702, 702, 702, 702, 702, 702, 702, 503, 503, + /* 40 */ 111, 111, 217, 287, 348, 610, 610, 736, 736, 736, + /* 50 */ 736, 40, 112, 320, 340, 445, 489, 593, 637, 741, + /* 60 */ 785, 889, 909, 1023, 1043, 1157, 1177, 1177, 1177, 1177, + /* 70 */ 1177, 1177, 1177, 1177, 1177, 1177, 1177, 1177, 1177, 1177, + /* 80 */ 1177, 1177, 1177, 1177, 1197, 1177, 1301, 1321, 1321, 554, + /* 90 */ 1802, 1910, 702, 702, 702, 702, 702, 702, 702, 702, + /* 100 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 110 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 120 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 130 */ 702, 702, 702, 702, 702, 702, 702, 702, 702, 702, + /* 140 */ 702, 702, 138, 198, 198, 198, 198, 198, 198, 198, + /* 150 */ 183, 99, 169, 549, 610, 151, 542, 610, 610, 1017, + /* 160 */ 1017, 610, 1001, 350, 464, 464, 464, 586, 1, 1, + /* 170 */ 2207, 2207, 854, 854, 854, 465, 694, 694, 694, 694, + /* 180 */ 1096, 1096, 825, 549, 847, 904, 610, 610, 610, 610, + /* 190 */ 610, 610, 610, 610, 610, 610, 610, 610, 610, 610, + /* 200 */ 610, 610, 610, 610, 610, 488, 947, 947, 610, 1129, + /* 210 */ 495, 495, 1139, 1139, 967, 967, 1173, 2207, 2207, 2207, + /* 220 */ 2207, 2207, 2207, 2207, 617, 765, 765, 697, 444, 708, + /* 230 */ 660, 745, 510, 663, 864, 610, 610, 610, 610, 610, + /* 240 */ 610, 610, 610, 610, 610, 188, 610, 610, 610, 610, + /* 250 */ 610, 610, 610, 610, 610, 610, 610, 610, 839, 839, + /* 260 */ 839, 610, 610, 610, 1155, 610, 610, 610, 1119, 1247, + /* 270 */ 610, 1353, 610, 610, 610, 610, 610, 610, 610, 610, + /* 280 */ 1063, 494, 1101, 291, 291, 291, 291, 1319, 1101, 1101, + /* 290 */ 775, 1221, 1375, 1452, 667, 1341, 1198, 1341, 1435, 1487, + /* 300 */ 667, 667, 1487, 667, 1198, 1435, 777, 1011, 1423, 584, + /* 310 */ 584, 584, 1273, 1273, 1273, 1273, 1471, 1471, 880, 1530, + /* 320 */ 1190, 1095, 1731, 1731, 1668, 1668, 1794, 1794, 1668, 1683, + /* 330 */ 1685, 1815, 1796, 1824, 1824, 1824, 1824, 1668, 1828, 1701, + /* 340 */ 1685, 1685, 1701, 1815, 1796, 1701, 1796, 1701, 1668, 1828, + /* 350 */ 1697, 1800, 1668, 1828, 1848, 1668, 1828, 1668, 1828, 1848, + /* 360 */ 1766, 1766, 1766, 1823, 1870, 1870, 1848, 1766, 1767, 1766, + /* 370 */ 1823, 1766, 1766, 1727, 1872, 1783, 1783, 1848, 1668, 1813, + /* 380 */ 1813, 1825, 1825, 1777, 1781, 1906, 1668, 1774, 1777, 1789, + /* 390 */ 1792, 1701, 1919, 1935, 1935, 1949, 1949, 1949, 2207, 2207, + /* 400 */ 2207, 2207, 2207, 2207, 2207, 2207, 2207, 2207, 2207, 2207, + /* 410 */ 2207, 2207, 2207, 69, 1032, 79, 357, 1377, 1206, 400, + /* 420 */ 1525, 835, 332, 1540, 1437, 1539, 1536, 1548, 1583, 1620, + /* 430 */ 1633, 1670, 1671, 1674, 1567, 1553, 1682, 1506, 1675, 1358, + /* 440 */ 1607, 1589, 1678, 1681, 1624, 1687, 1688, 1283, 1561, 1693, + /* 450 */ 1696, 1623, 1521, 1976, 1980, 1962, 1822, 1972, 1973, 1965, + /* 460 */ 1967, 1851, 1840, 1862, 1969, 1969, 1971, 1853, 1977, 1854, + /* 470 */ 1982, 1999, 1858, 1871, 1969, 1873, 1941, 1968, 1969, 1855, + /* 480 */ 1952, 1954, 1955, 1956, 1881, 1896, 1981, 1874, 2013, 2014, + /* 490 */ 1998, 1905, 1860, 1957, 2008, 1966, 1947, 1983, 1894, 1921, + /* 500 */ 2020, 2018, 2026, 1915, 1923, 2028, 1984, 2036, 2040, 2047, + /* 510 */ 2041, 2003, 2012, 2050, 1979, 2049, 2056, 2011, 2044, 2057, + /* 520 */ 2048, 1934, 2063, 2064, 2065, 2061, 2066, 2068, 1993, 1950, + /* 530 */ 2071, 2072, 1985, 2062, 2075, 1959, 2073, 2067, 2070, 2076, + /* 540 */ 2078, 2010, 2030, 2022, 2069, 2031, 2021, 2082, 2094, 2083, + /* 550 */ 2095, 2093, 2096, 2086, 1986, 1987, 2100, 2073, 2101, 2103, + /* 560 */ 2104, 2109, 2107, 2108, 2111, 2113, 2125, 2115, 2116, 2117, + /* 570 */ 2118, 2121, 2122, 2114, 2009, 2004, 2005, 2006, 2124, 2127, + /* 580 */ 2136, 2151, 2152, }; -#define YY_REDUCE_COUNT (411) -#define YY_REDUCE_MIN (-275) -#define YY_REDUCE_MAX (1798) +#define YY_REDUCE_COUNT (412) +#define YY_REDUCE_MIN (-277) +#define YY_REDUCE_MAX (1772) static const short yy_reduce_ofst[] = { - /* 0 */ -71, 194, 343, 835, -180, -177, 838, -194, -188, -185, - /* 10 */ -183, 82, 183, -65, 133, 245, 346, 407, 458, -178, - /* 20 */ 75, -275, -4, 310, 312, 489, 575, 596, 463, 686, - /* 30 */ 707, 725, 780, 1098, 856, 778, 1059, 1090, 708, 887, - /* 40 */ 86, 448, 980, 630, 680, 681, 684, 796, 801, 796, - /* 50 */ 801, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 60 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 70 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 80 */ -261, -261, -261, -261, -261, -261, -261, -261, 391, 886, - /* 90 */ 888, 1013, 1016, 1081, 1087, 1151, 1159, 1177, 1185, 1188, - /* 100 */ 1190, 1194, 1197, 1203, 1247, 1260, 1264, 1267, 1269, 1273, - /* 110 */ 1315, 1322, 1335, 1337, 1356, 1362, 1418, 1425, 1453, 1457, - /* 120 */ 1465, 1473, 1487, 1495, 1507, 1517, 1521, 1534, 1543, 1546, - /* 130 */ 1549, 1552, 1554, 1560, 1581, 1590, 1593, 1595, 1621, 1623, - /* 140 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, - /* 150 */ -261, -186, -117, 260, 263, 460, 631, -74, 497, -181, - /* 160 */ -261, 939, 176, 274, 338, 676, -261, -261, -261, -261, - /* 170 */ -212, -212, -212, -184, 149, 777, 1061, 1103, 265, 419, - /* 180 */ -254, 670, 677, 677, -11, -129, 184, 488, 736, 789, - /* 190 */ 805, 844, 403, 529, 579, 668, 783, 841, 1158, 1112, - /* 200 */ 806, 861, 1095, 846, 839, 1031, -189, 1077, 1080, 1116, - /* 210 */ 1084, 1156, 1139, 1221, 46, 1099, 1037, 1118, 1171, 1214, - /* 220 */ 1210, 1258, -210, -190, -176, -115, 117, 262, 376, 490, - /* 230 */ 511, 520, 618, 639, 743, 901, 907, 958, 1014, 1055, - /* 240 */ 1108, 1193, 1244, 720, 1248, 1277, 1324, 1347, 1417, 1431, - /* 250 */ 1432, 1440, 1451, 1452, 1463, 1478, 1286, 1350, 1369, 1490, - /* 260 */ 1498, 1501, 773, 1509, 1513, 1528, 1292, 1367, 1535, 1536, - /* 270 */ 1477, 1542, 376, 1547, 1550, 1555, 1559, 1568, 1571, 1441, - /* 280 */ 1443, 1474, 1511, 1516, 1519, 1522, 773, 1474, 1474, 1503, - /* 290 */ 1567, 1594, 1484, 1527, 1556, 1570, 1557, 1524, 1573, 1545, - /* 300 */ 1548, 1576, 1561, 1587, 1540, 1575, 1606, 1611, 1622, 1624, - /* 310 */ 1626, 1582, 1597, 1598, 1599, 1601, 1603, 1563, 1608, 1605, - /* 320 */ 1604, 1564, 1566, 1655, 1660, 1578, 1579, 1665, 1586, 1607, - /* 330 */ 1610, 1642, 1641, 1645, 1646, 1647, 1679, 1688, 1644, 1618, - /* 340 */ 1619, 1648, 1628, 1659, 1649, 1663, 1650, 1700, 1702, 1612, - /* 350 */ 1615, 1706, 1708, 1689, 1709, 1712, 1713, 1715, 1691, 1698, - /* 360 */ 1699, 1701, 1693, 1704, 1707, 1705, 1714, 1703, 1718, 1710, - /* 370 */ 1720, 1721, 1632, 1634, 1664, 1666, 1729, 1751, 1635, 1636, - /* 380 */ 1692, 1694, 1716, 1722, 1684, 1763, 1685, 1723, 1724, 1727, - /* 390 */ 1730, 1768, 1783, 1784, 1793, 1794, 1796, 1683, 1686, 1690, - /* 400 */ 1782, 1779, 1780, 1781, 1785, 1788, 1774, 1775, 1786, 1787, - /* 410 */ 1789, 1798, + /* 0 */ -67, 1252, -64, -178, -181, 160, 1071, 143, -184, 137, + /* 10 */ 218, 220, 222, -174, 229, 268, 272, 275, 324, -208, + /* 20 */ 242, -277, -39, 81, 537, 792, 810, 812, -189, 814, + /* 30 */ 831, 163, 865, 944, 887, 840, 964, 1077, -187, 292, + /* 40 */ -133, 274, 673, 558, 682, 795, 809, -238, -232, -238, + /* 50 */ -232, 329, 329, 329, 329, 329, 329, 329, 329, 329, + /* 60 */ 329, 329, 329, 329, 329, 329, 329, 329, 329, 329, + /* 70 */ 329, 329, 329, 329, 329, 329, 329, 329, 329, 329, + /* 80 */ 329, 329, 329, 329, 329, 329, 329, 329, 329, 557, + /* 90 */ 712, 949, 966, 969, 971, 979, 1097, 1099, 1103, 1142, + /* 100 */ 1144, 1169, 1172, 1201, 1203, 1228, 1241, 1250, 1253, 1255, + /* 110 */ 1261, 1266, 1271, 1282, 1291, 1308, 1310, 1312, 1322, 1328, + /* 120 */ 1347, 1354, 1356, 1359, 1362, 1365, 1367, 1374, 1376, 1381, + /* 130 */ 1401, 1403, 1406, 1412, 1414, 1417, 1421, 1428, 1447, 1449, + /* 140 */ 1453, 1462, 329, 329, 329, 329, 329, 329, 329, 329, + /* 150 */ 329, 329, 329, -22, -159, 475, -220, 756, 38, 501, + /* 160 */ 841, 714, 329, 118, 337, 349, 363, -56, 329, 329, + /* 170 */ 329, 329, -205, -205, -205, 687, -172, -130, -57, 790, + /* 180 */ 397, 528, -271, 136, 596, 596, 90, 316, 522, 541, + /* 190 */ -37, 715, 849, 977, 628, 856, 980, 991, 1081, 1102, + /* 200 */ 1135, 1083, -162, 208, 1258, 794, -86, 159, 41, 1109, + /* 210 */ 671, 852, 844, 932, 1175, 1254, 480, 1180, 100, 258, + /* 220 */ 1265, 1268, 1216, 1287, -139, 317, 344, 63, 339, 423, + /* 230 */ 563, 636, 676, 813, 908, 914, 950, 1078, 1084, 1098, + /* 240 */ 1363, 1384, 1407, 1439, 1464, 411, 1527, 1534, 1535, 1537, + /* 250 */ 1541, 1542, 1543, 1544, 1545, 1547, 1549, 1550, 990, 1164, + /* 260 */ 1492, 1551, 1552, 1556, 1217, 1558, 1559, 1560, 1473, 1413, + /* 270 */ 1563, 1510, 1568, 563, 1570, 1571, 1572, 1573, 1574, 1575, + /* 280 */ 1443, 1466, 1518, 1513, 1514, 1515, 1516, 1217, 1518, 1518, + /* 290 */ 1531, 1562, 1582, 1477, 1505, 1511, 1533, 1512, 1488, 1538, + /* 300 */ 1509, 1517, 1546, 1519, 1557, 1489, 1565, 1564, 1578, 1586, + /* 310 */ 1587, 1588, 1526, 1528, 1554, 1555, 1576, 1577, 1566, 1579, + /* 320 */ 1584, 1591, 1520, 1523, 1617, 1628, 1580, 1581, 1632, 1585, + /* 330 */ 1590, 1593, 1604, 1605, 1606, 1608, 1609, 1641, 1649, 1610, + /* 340 */ 1592, 1594, 1611, 1595, 1616, 1612, 1618, 1613, 1651, 1654, + /* 350 */ 1596, 1598, 1655, 1663, 1650, 1673, 1680, 1677, 1684, 1653, + /* 360 */ 1664, 1666, 1667, 1662, 1669, 1672, 1676, 1686, 1679, 1691, + /* 370 */ 1689, 1692, 1694, 1597, 1599, 1619, 1630, 1699, 1700, 1602, + /* 380 */ 1615, 1648, 1657, 1690, 1698, 1658, 1729, 1652, 1695, 1702, + /* 390 */ 1704, 1703, 1741, 1754, 1758, 1768, 1769, 1771, 1660, 1661, + /* 400 */ 1665, 1752, 1756, 1757, 1759, 1760, 1764, 1745, 1753, 1762, + /* 410 */ 1763, 1761, 1772, }; static const YYACTIONTYPE yy_default[] = { /* 0 */ 1663, 1663, 1663, 1491, 1254, 1367, 1254, 1254, 1254, 1254, @@ -173648,57 +175410,57 @@ static const YYACTIONTYPE yy_default[] = { /* 30 */ 1254, 1254, 1254, 1254, 1254, 1490, 1254, 1254, 1254, 1254, /* 40 */ 1578, 1578, 1254, 1254, 1254, 1254, 1254, 1563, 1562, 1254, /* 50 */ 1254, 1254, 1406, 1254, 1413, 1254, 1254, 1254, 1254, 1254, - /* 60 */ 1492, 1493, 1254, 1254, 1254, 1543, 1545, 1508, 1420, 1419, - /* 70 */ 1418, 1417, 1526, 1385, 1411, 1404, 1408, 1487, 1488, 1486, - /* 80 */ 1641, 1493, 1492, 1254, 1407, 1455, 1471, 1454, 1254, 1254, + /* 60 */ 1492, 1493, 1254, 1254, 1254, 1254, 1543, 1545, 1508, 1420, + /* 70 */ 1419, 1418, 1417, 1526, 1385, 1411, 1404, 1408, 1487, 1488, + /* 80 */ 1486, 1641, 1493, 1492, 1254, 1407, 1455, 1471, 1454, 1254, /* 90 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 100 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 110 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 120 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 130 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 140 */ 1463, 1470, 1469, 1468, 1477, 1467, 1464, 1457, 1456, 1458, - /* 150 */ 1459, 1278, 1254, 1275, 1329, 1254, 1254, 1254, 1254, 1254, - /* 160 */ 1460, 1287, 1448, 1447, 1446, 1254, 1474, 1461, 1473, 1472, - /* 170 */ 1551, 1615, 1614, 1509, 1254, 1254, 1254, 1254, 1254, 1254, - /* 180 */ 1578, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 140 */ 1254, 1254, 1463, 1470, 1469, 1468, 1477, 1467, 1464, 1457, + /* 150 */ 1456, 1458, 1459, 1278, 1254, 1275, 1329, 1254, 1254, 1254, + /* 160 */ 1254, 1254, 1460, 1287, 1448, 1447, 1446, 1254, 1474, 1461, + /* 170 */ 1473, 1472, 1551, 1615, 1614, 1509, 1254, 1254, 1254, 1254, + /* 180 */ 1254, 1254, 1578, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 190 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 200 */ 1254, 1254, 1254, 1387, 1578, 1578, 1254, 1287, 1578, 1578, - /* 210 */ 1388, 1388, 1283, 1283, 1391, 1558, 1358, 1358, 1358, 1358, - /* 220 */ 1367, 1358, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 230 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1548, 1546, 1254, - /* 240 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 200 */ 1254, 1254, 1254, 1254, 1254, 1387, 1578, 1578, 1254, 1287, + /* 210 */ 1578, 1578, 1388, 1388, 1283, 1283, 1391, 1558, 1358, 1358, + /* 220 */ 1358, 1358, 1367, 1358, 1254, 1254, 1254, 1254, 1254, 1254, + /* 230 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1548, + /* 240 */ 1546, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 250 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 260 */ 1254, 1254, 1254, 1254, 1254, 1254, 1363, 1254, 1254, 1254, - /* 270 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1608, 1254, - /* 280 */ 1521, 1343, 1363, 1363, 1363, 1363, 1365, 1344, 1342, 1357, - /* 290 */ 1288, 1261, 1655, 1423, 1412, 1364, 1412, 1652, 1410, 1423, - /* 300 */ 1423, 1410, 1423, 1364, 1652, 1304, 1630, 1299, 1397, 1397, - /* 310 */ 1397, 1387, 1387, 1387, 1387, 1391, 1391, 1489, 1364, 1357, - /* 320 */ 1254, 1655, 1655, 1373, 1373, 1654, 1654, 1373, 1509, 1638, - /* 330 */ 1432, 1332, 1338, 1338, 1338, 1338, 1373, 1272, 1410, 1638, - /* 340 */ 1638, 1410, 1432, 1332, 1410, 1332, 1410, 1373, 1272, 1525, - /* 350 */ 1649, 1373, 1272, 1499, 1373, 1272, 1373, 1272, 1499, 1330, - /* 360 */ 1330, 1330, 1319, 1254, 1254, 1499, 1330, 1304, 1330, 1319, - /* 370 */ 1330, 1330, 1596, 1254, 1503, 1503, 1499, 1373, 1588, 1588, - /* 380 */ 1400, 1400, 1405, 1391, 1494, 1373, 1254, 1405, 1403, 1401, - /* 390 */ 1410, 1322, 1611, 1611, 1607, 1607, 1607, 1660, 1660, 1558, - /* 400 */ 1623, 1287, 1287, 1287, 1287, 1623, 1306, 1306, 1288, 1288, - /* 410 */ 1287, 1623, 1254, 1254, 1254, 1254, 1254, 1254, 1618, 1254, - /* 420 */ 1553, 1510, 1377, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 430 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1564, - /* 440 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 450 */ 1254, 1437, 1254, 1257, 1555, 1254, 1254, 1254, 1254, 1254, - /* 460 */ 1254, 1254, 1254, 1414, 1415, 1378, 1254, 1254, 1254, 1254, - /* 470 */ 1254, 1254, 1254, 1429, 1254, 1254, 1254, 1424, 1254, 1254, - /* 480 */ 1254, 1254, 1254, 1254, 1254, 1254, 1651, 1254, 1254, 1254, - /* 490 */ 1254, 1254, 1254, 1524, 1523, 1254, 1254, 1375, 1254, 1254, + /* 260 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1363, 1254, + /* 270 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1608, + /* 280 */ 1254, 1521, 1343, 1363, 1363, 1363, 1363, 1365, 1344, 1342, + /* 290 */ 1357, 1288, 1261, 1655, 1423, 1412, 1364, 1412, 1652, 1410, + /* 300 */ 1423, 1423, 1410, 1423, 1364, 1652, 1304, 1630, 1299, 1397, + /* 310 */ 1397, 1397, 1387, 1387, 1387, 1387, 1391, 1391, 1489, 1364, + /* 320 */ 1357, 1254, 1655, 1655, 1373, 1373, 1654, 1654, 1373, 1509, + /* 330 */ 1638, 1432, 1332, 1338, 1338, 1338, 1338, 1373, 1272, 1410, + /* 340 */ 1638, 1638, 1410, 1432, 1332, 1410, 1332, 1410, 1373, 1272, + /* 350 */ 1525, 1649, 1373, 1272, 1499, 1373, 1272, 1373, 1272, 1499, + /* 360 */ 1330, 1330, 1330, 1319, 1254, 1254, 1499, 1330, 1304, 1330, + /* 370 */ 1319, 1330, 1330, 1596, 1254, 1503, 1503, 1499, 1373, 1588, + /* 380 */ 1588, 1400, 1400, 1405, 1391, 1494, 1373, 1254, 1405, 1403, + /* 390 */ 1401, 1410, 1322, 1611, 1611, 1607, 1607, 1607, 1660, 1660, + /* 400 */ 1558, 1623, 1287, 1287, 1287, 1287, 1623, 1306, 1306, 1288, + /* 410 */ 1288, 1287, 1623, 1254, 1254, 1254, 1254, 1254, 1254, 1618, + /* 420 */ 1254, 1553, 1510, 1377, 1254, 1254, 1254, 1254, 1254, 1254, + /* 430 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 440 */ 1564, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 450 */ 1254, 1254, 1437, 1254, 1257, 1555, 1254, 1254, 1254, 1254, + /* 460 */ 1254, 1254, 1254, 1254, 1414, 1415, 1378, 1254, 1254, 1254, + /* 470 */ 1254, 1254, 1254, 1254, 1429, 1254, 1254, 1254, 1424, 1254, + /* 480 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1651, 1254, 1254, + /* 490 */ 1254, 1254, 1254, 1254, 1524, 1523, 1254, 1254, 1375, 1254, /* 500 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 510 */ 1254, 1302, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 510 */ 1254, 1254, 1302, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 520 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 530 */ 1254, 1254, 1254, 1254, 1254, 1402, 1254, 1254, 1254, 1254, + /* 530 */ 1254, 1254, 1254, 1254, 1254, 1254, 1402, 1254, 1254, 1254, /* 540 */ 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, - /* 550 */ 1593, 1392, 1254, 1254, 1254, 1254, 1642, 1254, 1254, 1254, - /* 560 */ 1254, 1352, 1254, 1254, 1254, 1254, 1254, 1254, 1254, 1254, + /* 550 */ 1254, 1593, 1392, 1254, 1254, 1254, 1254, 1642, 1254, 1254, + /* 560 */ 1254, 1254, 1352, 1254, 1254, 1254, 1254, 1254, 1254, 1254, /* 570 */ 1254, 1254, 1254, 1634, 1346, 1438, 1254, 1441, 1276, 1254, /* 580 */ 1266, 1254, 1254, }; @@ -173722,52 +175484,53 @@ static const YYACTIONTYPE yy_default[] = { static const YYCODETYPE yyFallback[] = { 0, /* $ => nothing */ 0, /* SEMI => nothing */ - 59, /* EXPLAIN => ID */ - 59, /* QUERY => ID */ - 59, /* PLAN => ID */ - 59, /* BEGIN => ID */ + 60, /* EXPLAIN => ID */ + 60, /* QUERY => ID */ + 60, /* PLAN => ID */ + 60, /* BEGIN => ID */ 0, /* TRANSACTION => nothing */ - 59, /* DEFERRED => ID */ - 59, /* IMMEDIATE => ID */ - 59, /* EXCLUSIVE => ID */ + 60, /* DEFERRED => ID */ + 60, /* IMMEDIATE => ID */ + 60, /* EXCLUSIVE => ID */ 0, /* COMMIT => nothing */ - 59, /* END => ID */ - 59, /* ROLLBACK => ID */ - 59, /* SAVEPOINT => ID */ - 59, /* RELEASE => ID */ + 60, /* END => ID */ + 60, /* ROLLBACK => ID */ + 60, /* SAVEPOINT => ID */ + 60, /* RELEASE => ID */ 0, /* TO => nothing */ 0, /* TABLE => nothing */ 0, /* CREATE => nothing */ - 59, /* IF => ID */ + 60, /* IF => ID */ 0, /* NOT => nothing */ 0, /* EXISTS => nothing */ - 59, /* TEMP => ID */ + 60, /* TEMP => ID */ 0, /* LP => nothing */ 0, /* RP => nothing */ 0, /* AS => nothing */ 0, /* COMMA => nothing */ - 59, /* WITHOUT => ID */ - 59, /* ABORT => ID */ - 59, /* ACTION => ID */ - 59, /* AFTER => ID */ - 59, /* ANALYZE => ID */ - 59, /* ASC => ID */ - 59, /* ATTACH => ID */ - 59, /* BEFORE => ID */ - 59, /* BY => ID */ - 59, /* CASCADE => ID */ - 59, /* CAST => ID */ - 59, /* CONFLICT => ID */ - 59, /* DATABASE => ID */ - 59, /* DESC => ID */ - 59, /* DETACH => ID */ - 59, /* EACH => ID */ - 59, /* FAIL => ID */ + 60, /* WITHOUT => ID */ + 60, /* ABORT => ID */ + 60, /* ACTION => ID */ + 60, /* AFTER => ID */ + 60, /* ANALYZE => ID */ + 60, /* ASC => ID */ + 60, /* ATTACH => ID */ + 60, /* BEFORE => ID */ + 60, /* BY => ID */ + 60, /* CASCADE => ID */ + 60, /* CAST => ID */ + 60, /* CONFLICT => ID */ + 60, /* DATABASE => ID */ + 60, /* DESC => ID */ + 60, /* DETACH => ID */ + 60, /* EACH => ID */ + 60, /* FAIL => ID */ 0, /* OR => nothing */ 0, /* AND => nothing */ 0, /* IS => nothing */ - 59, /* MATCH => ID */ - 59, /* LIKE_KW => ID */ + 0, /* ISNOT => nothing */ + 60, /* MATCH => ID */ + 60, /* LIKE_KW => ID */ 0, /* BETWEEN => nothing */ 0, /* IN => nothing */ 0, /* ISNULL => nothing */ @@ -173780,47 +175543,47 @@ static const YYCODETYPE yyFallback[] = { 0, /* GE => nothing */ 0, /* ESCAPE => nothing */ 0, /* ID => nothing */ - 59, /* COLUMNKW => ID */ - 59, /* DO => ID */ - 59, /* FOR => ID */ - 59, /* IGNORE => ID */ - 59, /* INITIALLY => ID */ - 59, /* INSTEAD => ID */ - 59, /* NO => ID */ - 59, /* KEY => ID */ - 59, /* OF => ID */ - 59, /* OFFSET => ID */ - 59, /* PRAGMA => ID */ - 59, /* RAISE => ID */ - 59, /* RECURSIVE => ID */ - 59, /* REPLACE => ID */ - 59, /* RESTRICT => ID */ - 59, /* ROW => ID */ - 59, /* ROWS => ID */ - 59, /* TRIGGER => ID */ - 59, /* VACUUM => ID */ - 59, /* VIEW => ID */ - 59, /* VIRTUAL => ID */ - 59, /* WITH => ID */ - 59, /* NULLS => ID */ - 59, /* FIRST => ID */ - 59, /* LAST => ID */ - 59, /* CURRENT => ID */ - 59, /* FOLLOWING => ID */ - 59, /* PARTITION => ID */ - 59, /* PRECEDING => ID */ - 59, /* RANGE => ID */ - 59, /* UNBOUNDED => ID */ - 59, /* EXCLUDE => ID */ - 59, /* GROUPS => ID */ - 59, /* OTHERS => ID */ - 59, /* TIES => ID */ - 59, /* GENERATED => ID */ - 59, /* ALWAYS => ID */ - 59, /* MATERIALIZED => ID */ - 59, /* REINDEX => ID */ - 59, /* RENAME => ID */ - 59, /* CTIME_KW => ID */ + 60, /* COLUMNKW => ID */ + 60, /* DO => ID */ + 60, /* FOR => ID */ + 60, /* IGNORE => ID */ + 60, /* INITIALLY => ID */ + 60, /* INSTEAD => ID */ + 60, /* NO => ID */ + 60, /* KEY => ID */ + 60, /* OF => ID */ + 60, /* OFFSET => ID */ + 60, /* PRAGMA => ID */ + 60, /* RAISE => ID */ + 60, /* RECURSIVE => ID */ + 60, /* REPLACE => ID */ + 60, /* RESTRICT => ID */ + 60, /* ROW => ID */ + 60, /* ROWS => ID */ + 60, /* TRIGGER => ID */ + 60, /* VACUUM => ID */ + 60, /* VIEW => ID */ + 60, /* VIRTUAL => ID */ + 60, /* WITH => ID */ + 60, /* NULLS => ID */ + 60, /* FIRST => ID */ + 60, /* LAST => ID */ + 60, /* CURRENT => ID */ + 60, /* FOLLOWING => ID */ + 60, /* PARTITION => ID */ + 60, /* PRECEDING => ID */ + 60, /* RANGE => ID */ + 60, /* UNBOUNDED => ID */ + 60, /* EXCLUDE => ID */ + 60, /* GROUPS => ID */ + 60, /* OTHERS => ID */ + 60, /* TIES => ID */ + 60, /* GENERATED => ID */ + 60, /* ALWAYS => ID */ + 60, /* MATERIALIZED => ID */ + 60, /* REINDEX => ID */ + 60, /* RENAME => ID */ + 60, /* CTIME_KW => ID */ 0, /* ANY => nothing */ 0, /* BITAND => nothing */ 0, /* BITOR => nothing */ @@ -173891,7 +175654,6 @@ static const YYCODETYPE yyFallback[] = { 0, /* AGG_FUNCTION => nothing */ 0, /* AGG_COLUMN => nothing */ 0, /* TRUEFALSE => nothing */ - 0, /* ISNOT => nothing */ 0, /* FUNCTION => nothing */ 0, /* UPLUS => nothing */ 0, /* UMINUS => nothing */ @@ -173905,6 +175667,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* ERROR => nothing */ 0, /* QNUMBER => nothing */ 0, /* SPACE => nothing */ + 0, /* COMMENT => nothing */ 0, /* ILLEGAL => nothing */ }; #endif /* YYFALLBACK */ @@ -174035,132 +175798,132 @@ static const char *const yyTokenName[] = { /* 43 */ "OR", /* 44 */ "AND", /* 45 */ "IS", - /* 46 */ "MATCH", - /* 47 */ "LIKE_KW", - /* 48 */ "BETWEEN", - /* 49 */ "IN", - /* 50 */ "ISNULL", - /* 51 */ "NOTNULL", - /* 52 */ "NE", - /* 53 */ "EQ", - /* 54 */ "GT", - /* 55 */ "LE", - /* 56 */ "LT", - /* 57 */ "GE", - /* 58 */ "ESCAPE", - /* 59 */ "ID", - /* 60 */ "COLUMNKW", - /* 61 */ "DO", - /* 62 */ "FOR", - /* 63 */ "IGNORE", - /* 64 */ "INITIALLY", - /* 65 */ "INSTEAD", - /* 66 */ "NO", - /* 67 */ "KEY", - /* 68 */ "OF", - /* 69 */ "OFFSET", - /* 70 */ "PRAGMA", - /* 71 */ "RAISE", - /* 72 */ "RECURSIVE", - /* 73 */ "REPLACE", - /* 74 */ "RESTRICT", - /* 75 */ "ROW", - /* 76 */ "ROWS", - /* 77 */ "TRIGGER", - /* 78 */ "VACUUM", - /* 79 */ "VIEW", - /* 80 */ "VIRTUAL", - /* 81 */ "WITH", - /* 82 */ "NULLS", - /* 83 */ "FIRST", - /* 84 */ "LAST", - /* 85 */ "CURRENT", - /* 86 */ "FOLLOWING", - /* 87 */ "PARTITION", - /* 88 */ "PRECEDING", - /* 89 */ "RANGE", - /* 90 */ "UNBOUNDED", - /* 91 */ "EXCLUDE", - /* 92 */ "GROUPS", - /* 93 */ "OTHERS", - /* 94 */ "TIES", - /* 95 */ "GENERATED", - /* 96 */ "ALWAYS", - /* 97 */ "MATERIALIZED", - /* 98 */ "REINDEX", - /* 99 */ "RENAME", - /* 100 */ "CTIME_KW", - /* 101 */ "ANY", - /* 102 */ "BITAND", - /* 103 */ "BITOR", - /* 104 */ "LSHIFT", - /* 105 */ "RSHIFT", - /* 106 */ "PLUS", - /* 107 */ "MINUS", - /* 108 */ "STAR", - /* 109 */ "SLASH", - /* 110 */ "REM", - /* 111 */ "CONCAT", - /* 112 */ "PTR", - /* 113 */ "COLLATE", - /* 114 */ "BITNOT", - /* 115 */ "ON", - /* 116 */ "INDEXED", - /* 117 */ "STRING", - /* 118 */ "JOIN_KW", - /* 119 */ "CONSTRAINT", - /* 120 */ "DEFAULT", - /* 121 */ "NULL", - /* 122 */ "PRIMARY", - /* 123 */ "UNIQUE", - /* 124 */ "CHECK", - /* 125 */ "REFERENCES", - /* 126 */ "AUTOINCR", - /* 127 */ "INSERT", - /* 128 */ "DELETE", - /* 129 */ "UPDATE", - /* 130 */ "SET", - /* 131 */ "DEFERRABLE", - /* 132 */ "FOREIGN", - /* 133 */ "DROP", - /* 134 */ "UNION", - /* 135 */ "ALL", - /* 136 */ "EXCEPT", - /* 137 */ "INTERSECT", - /* 138 */ "SELECT", - /* 139 */ "VALUES", - /* 140 */ "DISTINCT", - /* 141 */ "DOT", - /* 142 */ "FROM", - /* 143 */ "JOIN", - /* 144 */ "USING", - /* 145 */ "ORDER", - /* 146 */ "GROUP", - /* 147 */ "HAVING", - /* 148 */ "LIMIT", - /* 149 */ "WHERE", - /* 150 */ "RETURNING", - /* 151 */ "INTO", - /* 152 */ "NOTHING", - /* 153 */ "FLOAT", - /* 154 */ "BLOB", - /* 155 */ "INTEGER", - /* 156 */ "VARIABLE", - /* 157 */ "CASE", - /* 158 */ "WHEN", - /* 159 */ "THEN", - /* 160 */ "ELSE", - /* 161 */ "INDEX", - /* 162 */ "ALTER", - /* 163 */ "ADD", - /* 164 */ "WINDOW", - /* 165 */ "OVER", - /* 166 */ "FILTER", - /* 167 */ "COLUMN", - /* 168 */ "AGG_FUNCTION", - /* 169 */ "AGG_COLUMN", - /* 170 */ "TRUEFALSE", - /* 171 */ "ISNOT", + /* 46 */ "ISNOT", + /* 47 */ "MATCH", + /* 48 */ "LIKE_KW", + /* 49 */ "BETWEEN", + /* 50 */ "IN", + /* 51 */ "ISNULL", + /* 52 */ "NOTNULL", + /* 53 */ "NE", + /* 54 */ "EQ", + /* 55 */ "GT", + /* 56 */ "LE", + /* 57 */ "LT", + /* 58 */ "GE", + /* 59 */ "ESCAPE", + /* 60 */ "ID", + /* 61 */ "COLUMNKW", + /* 62 */ "DO", + /* 63 */ "FOR", + /* 64 */ "IGNORE", + /* 65 */ "INITIALLY", + /* 66 */ "INSTEAD", + /* 67 */ "NO", + /* 68 */ "KEY", + /* 69 */ "OF", + /* 70 */ "OFFSET", + /* 71 */ "PRAGMA", + /* 72 */ "RAISE", + /* 73 */ "RECURSIVE", + /* 74 */ "REPLACE", + /* 75 */ "RESTRICT", + /* 76 */ "ROW", + /* 77 */ "ROWS", + /* 78 */ "TRIGGER", + /* 79 */ "VACUUM", + /* 80 */ "VIEW", + /* 81 */ "VIRTUAL", + /* 82 */ "WITH", + /* 83 */ "NULLS", + /* 84 */ "FIRST", + /* 85 */ "LAST", + /* 86 */ "CURRENT", + /* 87 */ "FOLLOWING", + /* 88 */ "PARTITION", + /* 89 */ "PRECEDING", + /* 90 */ "RANGE", + /* 91 */ "UNBOUNDED", + /* 92 */ "EXCLUDE", + /* 93 */ "GROUPS", + /* 94 */ "OTHERS", + /* 95 */ "TIES", + /* 96 */ "GENERATED", + /* 97 */ "ALWAYS", + /* 98 */ "MATERIALIZED", + /* 99 */ "REINDEX", + /* 100 */ "RENAME", + /* 101 */ "CTIME_KW", + /* 102 */ "ANY", + /* 103 */ "BITAND", + /* 104 */ "BITOR", + /* 105 */ "LSHIFT", + /* 106 */ "RSHIFT", + /* 107 */ "PLUS", + /* 108 */ "MINUS", + /* 109 */ "STAR", + /* 110 */ "SLASH", + /* 111 */ "REM", + /* 112 */ "CONCAT", + /* 113 */ "PTR", + /* 114 */ "COLLATE", + /* 115 */ "BITNOT", + /* 116 */ "ON", + /* 117 */ "INDEXED", + /* 118 */ "STRING", + /* 119 */ "JOIN_KW", + /* 120 */ "CONSTRAINT", + /* 121 */ "DEFAULT", + /* 122 */ "NULL", + /* 123 */ "PRIMARY", + /* 124 */ "UNIQUE", + /* 125 */ "CHECK", + /* 126 */ "REFERENCES", + /* 127 */ "AUTOINCR", + /* 128 */ "INSERT", + /* 129 */ "DELETE", + /* 130 */ "UPDATE", + /* 131 */ "SET", + /* 132 */ "DEFERRABLE", + /* 133 */ "FOREIGN", + /* 134 */ "DROP", + /* 135 */ "UNION", + /* 136 */ "ALL", + /* 137 */ "EXCEPT", + /* 138 */ "INTERSECT", + /* 139 */ "SELECT", + /* 140 */ "VALUES", + /* 141 */ "DISTINCT", + /* 142 */ "DOT", + /* 143 */ "FROM", + /* 144 */ "JOIN", + /* 145 */ "USING", + /* 146 */ "ORDER", + /* 147 */ "GROUP", + /* 148 */ "HAVING", + /* 149 */ "LIMIT", + /* 150 */ "WHERE", + /* 151 */ "RETURNING", + /* 152 */ "INTO", + /* 153 */ "NOTHING", + /* 154 */ "FLOAT", + /* 155 */ "BLOB", + /* 156 */ "INTEGER", + /* 157 */ "VARIABLE", + /* 158 */ "CASE", + /* 159 */ "WHEN", + /* 160 */ "THEN", + /* 161 */ "ELSE", + /* 162 */ "INDEX", + /* 163 */ "ALTER", + /* 164 */ "ADD", + /* 165 */ "WINDOW", + /* 166 */ "OVER", + /* 167 */ "FILTER", + /* 168 */ "COLUMN", + /* 169 */ "AGG_FUNCTION", + /* 170 */ "AGG_COLUMN", + /* 171 */ "TRUEFALSE", /* 172 */ "FUNCTION", /* 173 */ "UPLUS", /* 174 */ "UMINUS", @@ -174174,143 +175937,144 @@ static const char *const yyTokenName[] = { /* 182 */ "ERROR", /* 183 */ "QNUMBER", /* 184 */ "SPACE", - /* 185 */ "ILLEGAL", - /* 186 */ "input", - /* 187 */ "cmdlist", - /* 188 */ "ecmd", - /* 189 */ "cmdx", - /* 190 */ "explain", - /* 191 */ "cmd", - /* 192 */ "transtype", - /* 193 */ "trans_opt", - /* 194 */ "nm", - /* 195 */ "savepoint_opt", - /* 196 */ "create_table", - /* 197 */ "create_table_args", - /* 198 */ "createkw", - /* 199 */ "temp", - /* 200 */ "ifnotexists", - /* 201 */ "dbnm", - /* 202 */ "columnlist", - /* 203 */ "conslist_opt", - /* 204 */ "table_option_set", - /* 205 */ "select", - /* 206 */ "table_option", - /* 207 */ "columnname", - /* 208 */ "carglist", - /* 209 */ "typetoken", - /* 210 */ "typename", - /* 211 */ "signed", - /* 212 */ "plus_num", - /* 213 */ "minus_num", - /* 214 */ "scanpt", - /* 215 */ "scantok", - /* 216 */ "ccons", - /* 217 */ "term", - /* 218 */ "expr", - /* 219 */ "onconf", - /* 220 */ "sortorder", - /* 221 */ "autoinc", - /* 222 */ "eidlist_opt", - /* 223 */ "refargs", - /* 224 */ "defer_subclause", - /* 225 */ "generated", - /* 226 */ "refarg", - /* 227 */ "refact", - /* 228 */ "init_deferred_pred_opt", - /* 229 */ "conslist", - /* 230 */ "tconscomma", - /* 231 */ "tcons", - /* 232 */ "sortlist", - /* 233 */ "eidlist", - /* 234 */ "defer_subclause_opt", - /* 235 */ "orconf", - /* 236 */ "resolvetype", - /* 237 */ "raisetype", - /* 238 */ "ifexists", - /* 239 */ "fullname", - /* 240 */ "selectnowith", - /* 241 */ "oneselect", - /* 242 */ "wqlist", - /* 243 */ "multiselect_op", - /* 244 */ "distinct", - /* 245 */ "selcollist", - /* 246 */ "from", - /* 247 */ "where_opt", - /* 248 */ "groupby_opt", - /* 249 */ "having_opt", - /* 250 */ "orderby_opt", - /* 251 */ "limit_opt", - /* 252 */ "window_clause", - /* 253 */ "values", - /* 254 */ "nexprlist", - /* 255 */ "mvalues", - /* 256 */ "sclp", - /* 257 */ "as", - /* 258 */ "seltablist", - /* 259 */ "stl_prefix", - /* 260 */ "joinop", - /* 261 */ "on_using", - /* 262 */ "indexed_by", - /* 263 */ "exprlist", - /* 264 */ "xfullname", - /* 265 */ "idlist", - /* 266 */ "indexed_opt", - /* 267 */ "nulls", - /* 268 */ "with", - /* 269 */ "where_opt_ret", - /* 270 */ "setlist", - /* 271 */ "insert_cmd", - /* 272 */ "idlist_opt", - /* 273 */ "upsert", - /* 274 */ "returning", - /* 275 */ "filter_over", - /* 276 */ "likeop", - /* 277 */ "between_op", - /* 278 */ "in_op", - /* 279 */ "paren_exprlist", - /* 280 */ "case_operand", - /* 281 */ "case_exprlist", - /* 282 */ "case_else", - /* 283 */ "uniqueflag", - /* 284 */ "collate", - /* 285 */ "vinto", - /* 286 */ "nmnum", - /* 287 */ "trigger_decl", - /* 288 */ "trigger_cmd_list", - /* 289 */ "trigger_time", - /* 290 */ "trigger_event", - /* 291 */ "foreach_clause", - /* 292 */ "when_clause", - /* 293 */ "trigger_cmd", - /* 294 */ "trnm", - /* 295 */ "tridxby", - /* 296 */ "database_kw_opt", - /* 297 */ "key_opt", - /* 298 */ "add_column_fullname", - /* 299 */ "kwcolumn_opt", - /* 300 */ "create_vtab", - /* 301 */ "vtabarglist", - /* 302 */ "vtabarg", - /* 303 */ "vtabargtoken", - /* 304 */ "lp", - /* 305 */ "anylist", - /* 306 */ "wqitem", - /* 307 */ "wqas", - /* 308 */ "withnm", - /* 309 */ "windowdefn_list", - /* 310 */ "windowdefn", - /* 311 */ "window", - /* 312 */ "frame_opt", - /* 313 */ "part_opt", - /* 314 */ "filter_clause", - /* 315 */ "over_clause", - /* 316 */ "range_or_rows", - /* 317 */ "frame_bound", - /* 318 */ "frame_bound_s", - /* 319 */ "frame_bound_e", - /* 320 */ "frame_exclude_opt", - /* 321 */ "frame_exclude", + /* 185 */ "COMMENT", + /* 186 */ "ILLEGAL", + /* 187 */ "input", + /* 188 */ "cmdlist", + /* 189 */ "ecmd", + /* 190 */ "cmdx", + /* 191 */ "explain", + /* 192 */ "cmd", + /* 193 */ "transtype", + /* 194 */ "trans_opt", + /* 195 */ "nm", + /* 196 */ "savepoint_opt", + /* 197 */ "create_table", + /* 198 */ "create_table_args", + /* 199 */ "createkw", + /* 200 */ "temp", + /* 201 */ "ifnotexists", + /* 202 */ "dbnm", + /* 203 */ "columnlist", + /* 204 */ "conslist_opt", + /* 205 */ "table_option_set", + /* 206 */ "select", + /* 207 */ "table_option", + /* 208 */ "columnname", + /* 209 */ "carglist", + /* 210 */ "typetoken", + /* 211 */ "typename", + /* 212 */ "signed", + /* 213 */ "plus_num", + /* 214 */ "minus_num", + /* 215 */ "scanpt", + /* 216 */ "scantok", + /* 217 */ "ccons", + /* 218 */ "term", + /* 219 */ "expr", + /* 220 */ "onconf", + /* 221 */ "sortorder", + /* 222 */ "autoinc", + /* 223 */ "eidlist_opt", + /* 224 */ "refargs", + /* 225 */ "defer_subclause", + /* 226 */ "generated", + /* 227 */ "refarg", + /* 228 */ "refact", + /* 229 */ "init_deferred_pred_opt", + /* 230 */ "conslist", + /* 231 */ "tconscomma", + /* 232 */ "tcons", + /* 233 */ "sortlist", + /* 234 */ "eidlist", + /* 235 */ "defer_subclause_opt", + /* 236 */ "orconf", + /* 237 */ "resolvetype", + /* 238 */ "raisetype", + /* 239 */ "ifexists", + /* 240 */ "fullname", + /* 241 */ "selectnowith", + /* 242 */ "oneselect", + /* 243 */ "wqlist", + /* 244 */ "multiselect_op", + /* 245 */ "distinct", + /* 246 */ "selcollist", + /* 247 */ "from", + /* 248 */ "where_opt", + /* 249 */ "groupby_opt", + /* 250 */ "having_opt", + /* 251 */ "orderby_opt", + /* 252 */ "limit_opt", + /* 253 */ "window_clause", + /* 254 */ "values", + /* 255 */ "nexprlist", + /* 256 */ "mvalues", + /* 257 */ "sclp", + /* 258 */ "as", + /* 259 */ "seltablist", + /* 260 */ "stl_prefix", + /* 261 */ "joinop", + /* 262 */ "on_using", + /* 263 */ "indexed_by", + /* 264 */ "exprlist", + /* 265 */ "xfullname", + /* 266 */ "idlist", + /* 267 */ "indexed_opt", + /* 268 */ "nulls", + /* 269 */ "with", + /* 270 */ "where_opt_ret", + /* 271 */ "setlist", + /* 272 */ "insert_cmd", + /* 273 */ "idlist_opt", + /* 274 */ "upsert", + /* 275 */ "returning", + /* 276 */ "filter_over", + /* 277 */ "likeop", + /* 278 */ "between_op", + /* 279 */ "in_op", + /* 280 */ "paren_exprlist", + /* 281 */ "case_operand", + /* 282 */ "case_exprlist", + /* 283 */ "case_else", + /* 284 */ "uniqueflag", + /* 285 */ "collate", + /* 286 */ "vinto", + /* 287 */ "nmnum", + /* 288 */ "trigger_decl", + /* 289 */ "trigger_cmd_list", + /* 290 */ "trigger_time", + /* 291 */ "trigger_event", + /* 292 */ "foreach_clause", + /* 293 */ "when_clause", + /* 294 */ "trigger_cmd", + /* 295 */ "trnm", + /* 296 */ "tridxby", + /* 297 */ "database_kw_opt", + /* 298 */ "key_opt", + /* 299 */ "add_column_fullname", + /* 300 */ "kwcolumn_opt", + /* 301 */ "create_vtab", + /* 302 */ "vtabarglist", + /* 303 */ "vtabarg", + /* 304 */ "vtabargtoken", + /* 305 */ "lp", + /* 306 */ "anylist", + /* 307 */ "wqitem", + /* 308 */ "wqas", + /* 309 */ "withnm", + /* 310 */ "windowdefn_list", + /* 311 */ "windowdefn", + /* 312 */ "window", + /* 313 */ "frame_opt", + /* 314 */ "part_opt", + /* 315 */ "filter_clause", + /* 316 */ "over_clause", + /* 317 */ "range_or_rows", + /* 318 */ "frame_bound", + /* 319 */ "frame_bound_s", + /* 320 */ "frame_bound_e", + /* 321 */ "frame_exclude_opt", + /* 322 */ "frame_exclude", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -174598,7 +176362,7 @@ static const char *const yyRuleName[] = { /* 277 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", /* 278 */ "trigger_cmd ::= scanpt select scanpt", /* 279 */ "expr ::= RAISE LP IGNORE RP", - /* 280 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 280 */ "expr ::= RAISE LP raisetype COMMA expr RP", /* 281 */ "raisetype ::= ROLLBACK", /* 282 */ "raisetype ::= ABORT", /* 283 */ "raisetype ::= FAIL", @@ -174850,98 +176614,98 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 205: /* select */ - case 240: /* selectnowith */ - case 241: /* oneselect */ - case 253: /* values */ - case 255: /* mvalues */ + case 206: /* select */ + case 241: /* selectnowith */ + case 242: /* oneselect */ + case 254: /* values */ + case 256: /* mvalues */ { -sqlite3SelectDelete(pParse->db, (yypminor->yy555)); -} - break; - case 217: /* term */ - case 218: /* expr */ - case 247: /* where_opt */ - case 249: /* having_opt */ - case 269: /* where_opt_ret */ - case 280: /* case_operand */ - case 282: /* case_else */ - case 285: /* vinto */ - case 292: /* when_clause */ - case 297: /* key_opt */ - case 314: /* filter_clause */ +sqlite3SelectDelete(pParse->db, (yypminor->yy637)); +} + break; + case 218: /* term */ + case 219: /* expr */ + case 248: /* where_opt */ + case 250: /* having_opt */ + case 270: /* where_opt_ret */ + case 281: /* case_operand */ + case 283: /* case_else */ + case 286: /* vinto */ + case 293: /* when_clause */ + case 298: /* key_opt */ + case 315: /* filter_clause */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy454)); -} - break; - case 222: /* eidlist_opt */ - case 232: /* sortlist */ - case 233: /* eidlist */ - case 245: /* selcollist */ - case 248: /* groupby_opt */ - case 250: /* orderby_opt */ - case 254: /* nexprlist */ - case 256: /* sclp */ - case 263: /* exprlist */ - case 270: /* setlist */ - case 279: /* paren_exprlist */ - case 281: /* case_exprlist */ - case 313: /* part_opt */ +sqlite3ExprDelete(pParse->db, (yypminor->yy590)); +} + break; + case 223: /* eidlist_opt */ + case 233: /* sortlist */ + case 234: /* eidlist */ + case 246: /* selcollist */ + case 249: /* groupby_opt */ + case 251: /* orderby_opt */ + case 255: /* nexprlist */ + case 257: /* sclp */ + case 264: /* exprlist */ + case 271: /* setlist */ + case 280: /* paren_exprlist */ + case 282: /* case_exprlist */ + case 314: /* part_opt */ { -sqlite3ExprListDelete(pParse->db, (yypminor->yy14)); +sqlite3ExprListDelete(pParse->db, (yypminor->yy402)); } break; - case 239: /* fullname */ - case 246: /* from */ - case 258: /* seltablist */ - case 259: /* stl_prefix */ - case 264: /* xfullname */ + case 240: /* fullname */ + case 247: /* from */ + case 259: /* seltablist */ + case 260: /* stl_prefix */ + case 265: /* xfullname */ { -sqlite3SrcListDelete(pParse->db, (yypminor->yy203)); +sqlite3SrcListDelete(pParse->db, (yypminor->yy563)); } break; - case 242: /* wqlist */ + case 243: /* wqlist */ { -sqlite3WithDelete(pParse->db, (yypminor->yy59)); +sqlite3WithDelete(pParse->db, (yypminor->yy125)); } break; - case 252: /* window_clause */ - case 309: /* windowdefn_list */ + case 253: /* window_clause */ + case 310: /* windowdefn_list */ { -sqlite3WindowListDelete(pParse->db, (yypminor->yy211)); +sqlite3WindowListDelete(pParse->db, (yypminor->yy483)); } break; - case 265: /* idlist */ - case 272: /* idlist_opt */ + case 266: /* idlist */ + case 273: /* idlist_opt */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy132)); +sqlite3IdListDelete(pParse->db, (yypminor->yy204)); } break; - case 275: /* filter_over */ - case 310: /* windowdefn */ - case 311: /* window */ - case 312: /* frame_opt */ - case 315: /* over_clause */ + case 276: /* filter_over */ + case 311: /* windowdefn */ + case 312: /* window */ + case 313: /* frame_opt */ + case 316: /* over_clause */ { -sqlite3WindowDelete(pParse->db, (yypminor->yy211)); +sqlite3WindowDelete(pParse->db, (yypminor->yy483)); } break; - case 288: /* trigger_cmd_list */ - case 293: /* trigger_cmd */ + case 289: /* trigger_cmd_list */ + case 294: /* trigger_cmd */ { -sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy427)); +sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy319)); } break; - case 290: /* trigger_event */ + case 291: /* trigger_event */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy286).b); +sqlite3IdListDelete(pParse->db, (yypminor->yy28).b); } break; - case 317: /* frame_bound */ - case 318: /* frame_bound_s */ - case 319: /* frame_bound_e */ + case 318: /* frame_bound */ + case 319: /* frame_bound_s */ + case 320: /* frame_bound_e */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy509).pExpr); +sqlite3ExprDelete(pParse->db, (yypminor->yy205).pExpr); } break; /********* End destructor definitions *****************************************/ @@ -175243,415 +177007,415 @@ static void yy_shift( /* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side ** of that rule */ static const YYCODETYPE yyRuleInfoLhs[] = { - 190, /* (0) explain ::= EXPLAIN */ - 190, /* (1) explain ::= EXPLAIN QUERY PLAN */ - 189, /* (2) cmdx ::= cmd */ - 191, /* (3) cmd ::= BEGIN transtype trans_opt */ - 192, /* (4) transtype ::= */ - 192, /* (5) transtype ::= DEFERRED */ - 192, /* (6) transtype ::= IMMEDIATE */ - 192, /* (7) transtype ::= EXCLUSIVE */ - 191, /* (8) cmd ::= COMMIT|END trans_opt */ - 191, /* (9) cmd ::= ROLLBACK trans_opt */ - 191, /* (10) cmd ::= SAVEPOINT nm */ - 191, /* (11) cmd ::= RELEASE savepoint_opt nm */ - 191, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ - 196, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ - 198, /* (14) createkw ::= CREATE */ - 200, /* (15) ifnotexists ::= */ - 200, /* (16) ifnotexists ::= IF NOT EXISTS */ - 199, /* (17) temp ::= TEMP */ - 199, /* (18) temp ::= */ - 197, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ - 197, /* (20) create_table_args ::= AS select */ - 204, /* (21) table_option_set ::= */ - 204, /* (22) table_option_set ::= table_option_set COMMA table_option */ - 206, /* (23) table_option ::= WITHOUT nm */ - 206, /* (24) table_option ::= nm */ - 207, /* (25) columnname ::= nm typetoken */ - 209, /* (26) typetoken ::= */ - 209, /* (27) typetoken ::= typename LP signed RP */ - 209, /* (28) typetoken ::= typename LP signed COMMA signed RP */ - 210, /* (29) typename ::= typename ID|STRING */ - 214, /* (30) scanpt ::= */ - 215, /* (31) scantok ::= */ - 216, /* (32) ccons ::= CONSTRAINT nm */ - 216, /* (33) ccons ::= DEFAULT scantok term */ - 216, /* (34) ccons ::= DEFAULT LP expr RP */ - 216, /* (35) ccons ::= DEFAULT PLUS scantok term */ - 216, /* (36) ccons ::= DEFAULT MINUS scantok term */ - 216, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ - 216, /* (38) ccons ::= NOT NULL onconf */ - 216, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ - 216, /* (40) ccons ::= UNIQUE onconf */ - 216, /* (41) ccons ::= CHECK LP expr RP */ - 216, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ - 216, /* (43) ccons ::= defer_subclause */ - 216, /* (44) ccons ::= COLLATE ID|STRING */ - 225, /* (45) generated ::= LP expr RP */ - 225, /* (46) generated ::= LP expr RP ID */ - 221, /* (47) autoinc ::= */ - 221, /* (48) autoinc ::= AUTOINCR */ - 223, /* (49) refargs ::= */ - 223, /* (50) refargs ::= refargs refarg */ - 226, /* (51) refarg ::= MATCH nm */ - 226, /* (52) refarg ::= ON INSERT refact */ - 226, /* (53) refarg ::= ON DELETE refact */ - 226, /* (54) refarg ::= ON UPDATE refact */ - 227, /* (55) refact ::= SET NULL */ - 227, /* (56) refact ::= SET DEFAULT */ - 227, /* (57) refact ::= CASCADE */ - 227, /* (58) refact ::= RESTRICT */ - 227, /* (59) refact ::= NO ACTION */ - 224, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ - 224, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - 228, /* (62) init_deferred_pred_opt ::= */ - 228, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ - 228, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ - 203, /* (65) conslist_opt ::= */ - 230, /* (66) tconscomma ::= COMMA */ - 231, /* (67) tcons ::= CONSTRAINT nm */ - 231, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ - 231, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ - 231, /* (70) tcons ::= CHECK LP expr RP onconf */ - 231, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ - 234, /* (72) defer_subclause_opt ::= */ - 219, /* (73) onconf ::= */ - 219, /* (74) onconf ::= ON CONFLICT resolvetype */ - 235, /* (75) orconf ::= */ - 235, /* (76) orconf ::= OR resolvetype */ - 236, /* (77) resolvetype ::= IGNORE */ - 236, /* (78) resolvetype ::= REPLACE */ - 191, /* (79) cmd ::= DROP TABLE ifexists fullname */ - 238, /* (80) ifexists ::= IF EXISTS */ - 238, /* (81) ifexists ::= */ - 191, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ - 191, /* (83) cmd ::= DROP VIEW ifexists fullname */ - 191, /* (84) cmd ::= select */ - 205, /* (85) select ::= WITH wqlist selectnowith */ - 205, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ - 205, /* (87) select ::= selectnowith */ - 240, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ - 243, /* (89) multiselect_op ::= UNION */ - 243, /* (90) multiselect_op ::= UNION ALL */ - 243, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ - 241, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ - 241, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ - 253, /* (94) values ::= VALUES LP nexprlist RP */ - 241, /* (95) oneselect ::= mvalues */ - 255, /* (96) mvalues ::= values COMMA LP nexprlist RP */ - 255, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ - 244, /* (98) distinct ::= DISTINCT */ - 244, /* (99) distinct ::= ALL */ - 244, /* (100) distinct ::= */ - 256, /* (101) sclp ::= */ - 245, /* (102) selcollist ::= sclp scanpt expr scanpt as */ - 245, /* (103) selcollist ::= sclp scanpt STAR */ - 245, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ - 257, /* (105) as ::= AS nm */ - 257, /* (106) as ::= */ - 246, /* (107) from ::= */ - 246, /* (108) from ::= FROM seltablist */ - 259, /* (109) stl_prefix ::= seltablist joinop */ - 259, /* (110) stl_prefix ::= */ - 258, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ - 258, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ - 258, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ - 258, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ - 258, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ - 201, /* (116) dbnm ::= */ - 201, /* (117) dbnm ::= DOT nm */ - 239, /* (118) fullname ::= nm */ - 239, /* (119) fullname ::= nm DOT nm */ - 264, /* (120) xfullname ::= nm */ - 264, /* (121) xfullname ::= nm DOT nm */ - 264, /* (122) xfullname ::= nm DOT nm AS nm */ - 264, /* (123) xfullname ::= nm AS nm */ - 260, /* (124) joinop ::= COMMA|JOIN */ - 260, /* (125) joinop ::= JOIN_KW JOIN */ - 260, /* (126) joinop ::= JOIN_KW nm JOIN */ - 260, /* (127) joinop ::= JOIN_KW nm nm JOIN */ - 261, /* (128) on_using ::= ON expr */ - 261, /* (129) on_using ::= USING LP idlist RP */ - 261, /* (130) on_using ::= */ - 266, /* (131) indexed_opt ::= */ - 262, /* (132) indexed_by ::= INDEXED BY nm */ - 262, /* (133) indexed_by ::= NOT INDEXED */ - 250, /* (134) orderby_opt ::= */ - 250, /* (135) orderby_opt ::= ORDER BY sortlist */ - 232, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ - 232, /* (137) sortlist ::= expr sortorder nulls */ - 220, /* (138) sortorder ::= ASC */ - 220, /* (139) sortorder ::= DESC */ - 220, /* (140) sortorder ::= */ - 267, /* (141) nulls ::= NULLS FIRST */ - 267, /* (142) nulls ::= NULLS LAST */ - 267, /* (143) nulls ::= */ - 248, /* (144) groupby_opt ::= */ - 248, /* (145) groupby_opt ::= GROUP BY nexprlist */ - 249, /* (146) having_opt ::= */ - 249, /* (147) having_opt ::= HAVING expr */ - 251, /* (148) limit_opt ::= */ - 251, /* (149) limit_opt ::= LIMIT expr */ - 251, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ - 251, /* (151) limit_opt ::= LIMIT expr COMMA expr */ - 191, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ - 247, /* (153) where_opt ::= */ - 247, /* (154) where_opt ::= WHERE expr */ - 269, /* (155) where_opt_ret ::= */ - 269, /* (156) where_opt_ret ::= WHERE expr */ - 269, /* (157) where_opt_ret ::= RETURNING selcollist */ - 269, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ - 191, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ - 270, /* (160) setlist ::= setlist COMMA nm EQ expr */ - 270, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ - 270, /* (162) setlist ::= nm EQ expr */ - 270, /* (163) setlist ::= LP idlist RP EQ expr */ - 191, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - 191, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ - 273, /* (166) upsert ::= */ - 273, /* (167) upsert ::= RETURNING selcollist */ - 273, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ - 273, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ - 273, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ - 273, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ - 274, /* (172) returning ::= RETURNING selcollist */ - 271, /* (173) insert_cmd ::= INSERT orconf */ - 271, /* (174) insert_cmd ::= REPLACE */ - 272, /* (175) idlist_opt ::= */ - 272, /* (176) idlist_opt ::= LP idlist RP */ - 265, /* (177) idlist ::= idlist COMMA nm */ - 265, /* (178) idlist ::= nm */ - 218, /* (179) expr ::= LP expr RP */ - 218, /* (180) expr ::= ID|INDEXED|JOIN_KW */ - 218, /* (181) expr ::= nm DOT nm */ - 218, /* (182) expr ::= nm DOT nm DOT nm */ - 217, /* (183) term ::= NULL|FLOAT|BLOB */ - 217, /* (184) term ::= STRING */ - 217, /* (185) term ::= INTEGER */ - 218, /* (186) expr ::= VARIABLE */ - 218, /* (187) expr ::= expr COLLATE ID|STRING */ - 218, /* (188) expr ::= CAST LP expr AS typetoken RP */ - 218, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ - 218, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ - 218, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ - 218, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ - 218, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ - 218, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ - 217, /* (195) term ::= CTIME_KW */ - 218, /* (196) expr ::= LP nexprlist COMMA expr RP */ - 218, /* (197) expr ::= expr AND expr */ - 218, /* (198) expr ::= expr OR expr */ - 218, /* (199) expr ::= expr LT|GT|GE|LE expr */ - 218, /* (200) expr ::= expr EQ|NE expr */ - 218, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 218, /* (202) expr ::= expr PLUS|MINUS expr */ - 218, /* (203) expr ::= expr STAR|SLASH|REM expr */ - 218, /* (204) expr ::= expr CONCAT expr */ - 276, /* (205) likeop ::= NOT LIKE_KW|MATCH */ - 218, /* (206) expr ::= expr likeop expr */ - 218, /* (207) expr ::= expr likeop expr ESCAPE expr */ - 218, /* (208) expr ::= expr ISNULL|NOTNULL */ - 218, /* (209) expr ::= expr NOT NULL */ - 218, /* (210) expr ::= expr IS expr */ - 218, /* (211) expr ::= expr IS NOT expr */ - 218, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ - 218, /* (213) expr ::= expr IS DISTINCT FROM expr */ - 218, /* (214) expr ::= NOT expr */ - 218, /* (215) expr ::= BITNOT expr */ - 218, /* (216) expr ::= PLUS|MINUS expr */ - 218, /* (217) expr ::= expr PTR expr */ - 277, /* (218) between_op ::= BETWEEN */ - 277, /* (219) between_op ::= NOT BETWEEN */ - 218, /* (220) expr ::= expr between_op expr AND expr */ - 278, /* (221) in_op ::= IN */ - 278, /* (222) in_op ::= NOT IN */ - 218, /* (223) expr ::= expr in_op LP exprlist RP */ - 218, /* (224) expr ::= LP select RP */ - 218, /* (225) expr ::= expr in_op LP select RP */ - 218, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ - 218, /* (227) expr ::= EXISTS LP select RP */ - 218, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ - 281, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 281, /* (230) case_exprlist ::= WHEN expr THEN expr */ - 282, /* (231) case_else ::= ELSE expr */ - 282, /* (232) case_else ::= */ - 280, /* (233) case_operand ::= */ - 263, /* (234) exprlist ::= */ - 254, /* (235) nexprlist ::= nexprlist COMMA expr */ - 254, /* (236) nexprlist ::= expr */ - 279, /* (237) paren_exprlist ::= */ - 279, /* (238) paren_exprlist ::= LP exprlist RP */ - 191, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - 283, /* (240) uniqueflag ::= UNIQUE */ - 283, /* (241) uniqueflag ::= */ - 222, /* (242) eidlist_opt ::= */ - 222, /* (243) eidlist_opt ::= LP eidlist RP */ - 233, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ - 233, /* (245) eidlist ::= nm collate sortorder */ - 284, /* (246) collate ::= */ - 284, /* (247) collate ::= COLLATE ID|STRING */ - 191, /* (248) cmd ::= DROP INDEX ifexists fullname */ - 191, /* (249) cmd ::= VACUUM vinto */ - 191, /* (250) cmd ::= VACUUM nm vinto */ - 285, /* (251) vinto ::= INTO expr */ - 285, /* (252) vinto ::= */ - 191, /* (253) cmd ::= PRAGMA nm dbnm */ - 191, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ - 191, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - 191, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ - 191, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - 212, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ - 213, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ - 191, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - 287, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - 289, /* (262) trigger_time ::= BEFORE|AFTER */ - 289, /* (263) trigger_time ::= INSTEAD OF */ - 289, /* (264) trigger_time ::= */ - 290, /* (265) trigger_event ::= DELETE|INSERT */ - 290, /* (266) trigger_event ::= UPDATE */ - 290, /* (267) trigger_event ::= UPDATE OF idlist */ - 292, /* (268) when_clause ::= */ - 292, /* (269) when_clause ::= WHEN expr */ - 288, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - 288, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ - 294, /* (272) trnm ::= nm DOT nm */ - 295, /* (273) tridxby ::= INDEXED BY nm */ - 295, /* (274) tridxby ::= NOT INDEXED */ - 293, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - 293, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - 293, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - 293, /* (278) trigger_cmd ::= scanpt select scanpt */ - 218, /* (279) expr ::= RAISE LP IGNORE RP */ - 218, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ - 237, /* (281) raisetype ::= ROLLBACK */ - 237, /* (282) raisetype ::= ABORT */ - 237, /* (283) raisetype ::= FAIL */ - 191, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ - 191, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - 191, /* (286) cmd ::= DETACH database_kw_opt expr */ - 297, /* (287) key_opt ::= */ - 297, /* (288) key_opt ::= KEY expr */ - 191, /* (289) cmd ::= REINDEX */ - 191, /* (290) cmd ::= REINDEX nm dbnm */ - 191, /* (291) cmd ::= ANALYZE */ - 191, /* (292) cmd ::= ANALYZE nm dbnm */ - 191, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ - 191, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - 191, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - 298, /* (296) add_column_fullname ::= fullname */ - 191, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - 191, /* (298) cmd ::= create_vtab */ - 191, /* (299) cmd ::= create_vtab LP vtabarglist RP */ - 300, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 302, /* (301) vtabarg ::= */ - 303, /* (302) vtabargtoken ::= ANY */ - 303, /* (303) vtabargtoken ::= lp anylist RP */ - 304, /* (304) lp ::= LP */ - 268, /* (305) with ::= WITH wqlist */ - 268, /* (306) with ::= WITH RECURSIVE wqlist */ - 307, /* (307) wqas ::= AS */ - 307, /* (308) wqas ::= AS MATERIALIZED */ - 307, /* (309) wqas ::= AS NOT MATERIALIZED */ - 306, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ - 308, /* (311) withnm ::= nm */ - 242, /* (312) wqlist ::= wqitem */ - 242, /* (313) wqlist ::= wqlist COMMA wqitem */ - 309, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 310, /* (315) windowdefn ::= nm AS LP window RP */ - 311, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 311, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 311, /* (318) window ::= ORDER BY sortlist frame_opt */ - 311, /* (319) window ::= nm ORDER BY sortlist frame_opt */ - 311, /* (320) window ::= nm frame_opt */ - 312, /* (321) frame_opt ::= */ - 312, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 312, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 316, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ - 318, /* (325) frame_bound_s ::= frame_bound */ - 318, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ - 319, /* (327) frame_bound_e ::= frame_bound */ - 319, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 317, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ - 317, /* (330) frame_bound ::= CURRENT ROW */ - 320, /* (331) frame_exclude_opt ::= */ - 320, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 321, /* (333) frame_exclude ::= NO OTHERS */ - 321, /* (334) frame_exclude ::= CURRENT ROW */ - 321, /* (335) frame_exclude ::= GROUP|TIES */ - 252, /* (336) window_clause ::= WINDOW windowdefn_list */ - 275, /* (337) filter_over ::= filter_clause over_clause */ - 275, /* (338) filter_over ::= over_clause */ - 275, /* (339) filter_over ::= filter_clause */ - 315, /* (340) over_clause ::= OVER LP window RP */ - 315, /* (341) over_clause ::= OVER nm */ - 314, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ - 217, /* (343) term ::= QNUMBER */ - 186, /* (344) input ::= cmdlist */ - 187, /* (345) cmdlist ::= cmdlist ecmd */ - 187, /* (346) cmdlist ::= ecmd */ - 188, /* (347) ecmd ::= SEMI */ - 188, /* (348) ecmd ::= cmdx SEMI */ - 188, /* (349) ecmd ::= explain cmdx SEMI */ - 193, /* (350) trans_opt ::= */ - 193, /* (351) trans_opt ::= TRANSACTION */ - 193, /* (352) trans_opt ::= TRANSACTION nm */ - 195, /* (353) savepoint_opt ::= SAVEPOINT */ - 195, /* (354) savepoint_opt ::= */ - 191, /* (355) cmd ::= create_table create_table_args */ - 204, /* (356) table_option_set ::= table_option */ - 202, /* (357) columnlist ::= columnlist COMMA columnname carglist */ - 202, /* (358) columnlist ::= columnname carglist */ - 194, /* (359) nm ::= ID|INDEXED|JOIN_KW */ - 194, /* (360) nm ::= STRING */ - 209, /* (361) typetoken ::= typename */ - 210, /* (362) typename ::= ID|STRING */ - 211, /* (363) signed ::= plus_num */ - 211, /* (364) signed ::= minus_num */ - 208, /* (365) carglist ::= carglist ccons */ - 208, /* (366) carglist ::= */ - 216, /* (367) ccons ::= NULL onconf */ - 216, /* (368) ccons ::= GENERATED ALWAYS AS generated */ - 216, /* (369) ccons ::= AS generated */ - 203, /* (370) conslist_opt ::= COMMA conslist */ - 229, /* (371) conslist ::= conslist tconscomma tcons */ - 229, /* (372) conslist ::= tcons */ - 230, /* (373) tconscomma ::= */ - 234, /* (374) defer_subclause_opt ::= defer_subclause */ - 236, /* (375) resolvetype ::= raisetype */ - 240, /* (376) selectnowith ::= oneselect */ - 241, /* (377) oneselect ::= values */ - 256, /* (378) sclp ::= selcollist COMMA */ - 257, /* (379) as ::= ID|STRING */ - 266, /* (380) indexed_opt ::= indexed_by */ - 274, /* (381) returning ::= */ - 218, /* (382) expr ::= term */ - 276, /* (383) likeop ::= LIKE_KW|MATCH */ - 280, /* (384) case_operand ::= expr */ - 263, /* (385) exprlist ::= nexprlist */ - 286, /* (386) nmnum ::= plus_num */ - 286, /* (387) nmnum ::= nm */ - 286, /* (388) nmnum ::= ON */ - 286, /* (389) nmnum ::= DELETE */ - 286, /* (390) nmnum ::= DEFAULT */ - 212, /* (391) plus_num ::= INTEGER|FLOAT */ - 291, /* (392) foreach_clause ::= */ - 291, /* (393) foreach_clause ::= FOR EACH ROW */ - 294, /* (394) trnm ::= nm */ - 295, /* (395) tridxby ::= */ - 296, /* (396) database_kw_opt ::= DATABASE */ - 296, /* (397) database_kw_opt ::= */ - 299, /* (398) kwcolumn_opt ::= */ - 299, /* (399) kwcolumn_opt ::= COLUMNKW */ - 301, /* (400) vtabarglist ::= vtabarg */ - 301, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ - 302, /* (402) vtabarg ::= vtabarg vtabargtoken */ - 305, /* (403) anylist ::= */ - 305, /* (404) anylist ::= anylist LP anylist RP */ - 305, /* (405) anylist ::= anylist ANY */ - 268, /* (406) with ::= */ - 309, /* (407) windowdefn_list ::= windowdefn */ - 311, /* (408) window ::= frame_opt */ + 191, /* (0) explain ::= EXPLAIN */ + 191, /* (1) explain ::= EXPLAIN QUERY PLAN */ + 190, /* (2) cmdx ::= cmd */ + 192, /* (3) cmd ::= BEGIN transtype trans_opt */ + 193, /* (4) transtype ::= */ + 193, /* (5) transtype ::= DEFERRED */ + 193, /* (6) transtype ::= IMMEDIATE */ + 193, /* (7) transtype ::= EXCLUSIVE */ + 192, /* (8) cmd ::= COMMIT|END trans_opt */ + 192, /* (9) cmd ::= ROLLBACK trans_opt */ + 192, /* (10) cmd ::= SAVEPOINT nm */ + 192, /* (11) cmd ::= RELEASE savepoint_opt nm */ + 192, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ + 197, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ + 199, /* (14) createkw ::= CREATE */ + 201, /* (15) ifnotexists ::= */ + 201, /* (16) ifnotexists ::= IF NOT EXISTS */ + 200, /* (17) temp ::= TEMP */ + 200, /* (18) temp ::= */ + 198, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ + 198, /* (20) create_table_args ::= AS select */ + 205, /* (21) table_option_set ::= */ + 205, /* (22) table_option_set ::= table_option_set COMMA table_option */ + 207, /* (23) table_option ::= WITHOUT nm */ + 207, /* (24) table_option ::= nm */ + 208, /* (25) columnname ::= nm typetoken */ + 210, /* (26) typetoken ::= */ + 210, /* (27) typetoken ::= typename LP signed RP */ + 210, /* (28) typetoken ::= typename LP signed COMMA signed RP */ + 211, /* (29) typename ::= typename ID|STRING */ + 215, /* (30) scanpt ::= */ + 216, /* (31) scantok ::= */ + 217, /* (32) ccons ::= CONSTRAINT nm */ + 217, /* (33) ccons ::= DEFAULT scantok term */ + 217, /* (34) ccons ::= DEFAULT LP expr RP */ + 217, /* (35) ccons ::= DEFAULT PLUS scantok term */ + 217, /* (36) ccons ::= DEFAULT MINUS scantok term */ + 217, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ + 217, /* (38) ccons ::= NOT NULL onconf */ + 217, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ + 217, /* (40) ccons ::= UNIQUE onconf */ + 217, /* (41) ccons ::= CHECK LP expr RP */ + 217, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ + 217, /* (43) ccons ::= defer_subclause */ + 217, /* (44) ccons ::= COLLATE ID|STRING */ + 226, /* (45) generated ::= LP expr RP */ + 226, /* (46) generated ::= LP expr RP ID */ + 222, /* (47) autoinc ::= */ + 222, /* (48) autoinc ::= AUTOINCR */ + 224, /* (49) refargs ::= */ + 224, /* (50) refargs ::= refargs refarg */ + 227, /* (51) refarg ::= MATCH nm */ + 227, /* (52) refarg ::= ON INSERT refact */ + 227, /* (53) refarg ::= ON DELETE refact */ + 227, /* (54) refarg ::= ON UPDATE refact */ + 228, /* (55) refact ::= SET NULL */ + 228, /* (56) refact ::= SET DEFAULT */ + 228, /* (57) refact ::= CASCADE */ + 228, /* (58) refact ::= RESTRICT */ + 228, /* (59) refact ::= NO ACTION */ + 225, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ + 225, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + 229, /* (62) init_deferred_pred_opt ::= */ + 229, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ + 229, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ + 204, /* (65) conslist_opt ::= */ + 231, /* (66) tconscomma ::= COMMA */ + 232, /* (67) tcons ::= CONSTRAINT nm */ + 232, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ + 232, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ + 232, /* (70) tcons ::= CHECK LP expr RP onconf */ + 232, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ + 235, /* (72) defer_subclause_opt ::= */ + 220, /* (73) onconf ::= */ + 220, /* (74) onconf ::= ON CONFLICT resolvetype */ + 236, /* (75) orconf ::= */ + 236, /* (76) orconf ::= OR resolvetype */ + 237, /* (77) resolvetype ::= IGNORE */ + 237, /* (78) resolvetype ::= REPLACE */ + 192, /* (79) cmd ::= DROP TABLE ifexists fullname */ + 239, /* (80) ifexists ::= IF EXISTS */ + 239, /* (81) ifexists ::= */ + 192, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ + 192, /* (83) cmd ::= DROP VIEW ifexists fullname */ + 192, /* (84) cmd ::= select */ + 206, /* (85) select ::= WITH wqlist selectnowith */ + 206, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ + 206, /* (87) select ::= selectnowith */ + 241, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ + 244, /* (89) multiselect_op ::= UNION */ + 244, /* (90) multiselect_op ::= UNION ALL */ + 244, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ + 242, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ + 242, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ + 254, /* (94) values ::= VALUES LP nexprlist RP */ + 242, /* (95) oneselect ::= mvalues */ + 256, /* (96) mvalues ::= values COMMA LP nexprlist RP */ + 256, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ + 245, /* (98) distinct ::= DISTINCT */ + 245, /* (99) distinct ::= ALL */ + 245, /* (100) distinct ::= */ + 257, /* (101) sclp ::= */ + 246, /* (102) selcollist ::= sclp scanpt expr scanpt as */ + 246, /* (103) selcollist ::= sclp scanpt STAR */ + 246, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ + 258, /* (105) as ::= AS nm */ + 258, /* (106) as ::= */ + 247, /* (107) from ::= */ + 247, /* (108) from ::= FROM seltablist */ + 260, /* (109) stl_prefix ::= seltablist joinop */ + 260, /* (110) stl_prefix ::= */ + 259, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ + 259, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + 259, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + 259, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ + 259, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ + 202, /* (116) dbnm ::= */ + 202, /* (117) dbnm ::= DOT nm */ + 240, /* (118) fullname ::= nm */ + 240, /* (119) fullname ::= nm DOT nm */ + 265, /* (120) xfullname ::= nm */ + 265, /* (121) xfullname ::= nm DOT nm */ + 265, /* (122) xfullname ::= nm DOT nm AS nm */ + 265, /* (123) xfullname ::= nm AS nm */ + 261, /* (124) joinop ::= COMMA|JOIN */ + 261, /* (125) joinop ::= JOIN_KW JOIN */ + 261, /* (126) joinop ::= JOIN_KW nm JOIN */ + 261, /* (127) joinop ::= JOIN_KW nm nm JOIN */ + 262, /* (128) on_using ::= ON expr */ + 262, /* (129) on_using ::= USING LP idlist RP */ + 262, /* (130) on_using ::= */ + 267, /* (131) indexed_opt ::= */ + 263, /* (132) indexed_by ::= INDEXED BY nm */ + 263, /* (133) indexed_by ::= NOT INDEXED */ + 251, /* (134) orderby_opt ::= */ + 251, /* (135) orderby_opt ::= ORDER BY sortlist */ + 233, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ + 233, /* (137) sortlist ::= expr sortorder nulls */ + 221, /* (138) sortorder ::= ASC */ + 221, /* (139) sortorder ::= DESC */ + 221, /* (140) sortorder ::= */ + 268, /* (141) nulls ::= NULLS FIRST */ + 268, /* (142) nulls ::= NULLS LAST */ + 268, /* (143) nulls ::= */ + 249, /* (144) groupby_opt ::= */ + 249, /* (145) groupby_opt ::= GROUP BY nexprlist */ + 250, /* (146) having_opt ::= */ + 250, /* (147) having_opt ::= HAVING expr */ + 252, /* (148) limit_opt ::= */ + 252, /* (149) limit_opt ::= LIMIT expr */ + 252, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ + 252, /* (151) limit_opt ::= LIMIT expr COMMA expr */ + 192, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ + 248, /* (153) where_opt ::= */ + 248, /* (154) where_opt ::= WHERE expr */ + 270, /* (155) where_opt_ret ::= */ + 270, /* (156) where_opt_ret ::= WHERE expr */ + 270, /* (157) where_opt_ret ::= RETURNING selcollist */ + 270, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ + 192, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ + 271, /* (160) setlist ::= setlist COMMA nm EQ expr */ + 271, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ + 271, /* (162) setlist ::= nm EQ expr */ + 271, /* (163) setlist ::= LP idlist RP EQ expr */ + 192, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + 192, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 274, /* (166) upsert ::= */ + 274, /* (167) upsert ::= RETURNING selcollist */ + 274, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + 274, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + 274, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ + 274, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + 275, /* (172) returning ::= RETURNING selcollist */ + 272, /* (173) insert_cmd ::= INSERT orconf */ + 272, /* (174) insert_cmd ::= REPLACE */ + 273, /* (175) idlist_opt ::= */ + 273, /* (176) idlist_opt ::= LP idlist RP */ + 266, /* (177) idlist ::= idlist COMMA nm */ + 266, /* (178) idlist ::= nm */ + 219, /* (179) expr ::= LP expr RP */ + 219, /* (180) expr ::= ID|INDEXED|JOIN_KW */ + 219, /* (181) expr ::= nm DOT nm */ + 219, /* (182) expr ::= nm DOT nm DOT nm */ + 218, /* (183) term ::= NULL|FLOAT|BLOB */ + 218, /* (184) term ::= STRING */ + 218, /* (185) term ::= INTEGER */ + 219, /* (186) expr ::= VARIABLE */ + 219, /* (187) expr ::= expr COLLATE ID|STRING */ + 219, /* (188) expr ::= CAST LP expr AS typetoken RP */ + 219, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + 219, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + 219, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + 219, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + 219, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + 219, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + 218, /* (195) term ::= CTIME_KW */ + 219, /* (196) expr ::= LP nexprlist COMMA expr RP */ + 219, /* (197) expr ::= expr AND expr */ + 219, /* (198) expr ::= expr OR expr */ + 219, /* (199) expr ::= expr LT|GT|GE|LE expr */ + 219, /* (200) expr ::= expr EQ|NE expr */ + 219, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 219, /* (202) expr ::= expr PLUS|MINUS expr */ + 219, /* (203) expr ::= expr STAR|SLASH|REM expr */ + 219, /* (204) expr ::= expr CONCAT expr */ + 277, /* (205) likeop ::= NOT LIKE_KW|MATCH */ + 219, /* (206) expr ::= expr likeop expr */ + 219, /* (207) expr ::= expr likeop expr ESCAPE expr */ + 219, /* (208) expr ::= expr ISNULL|NOTNULL */ + 219, /* (209) expr ::= expr NOT NULL */ + 219, /* (210) expr ::= expr IS expr */ + 219, /* (211) expr ::= expr IS NOT expr */ + 219, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ + 219, /* (213) expr ::= expr IS DISTINCT FROM expr */ + 219, /* (214) expr ::= NOT expr */ + 219, /* (215) expr ::= BITNOT expr */ + 219, /* (216) expr ::= PLUS|MINUS expr */ + 219, /* (217) expr ::= expr PTR expr */ + 278, /* (218) between_op ::= BETWEEN */ + 278, /* (219) between_op ::= NOT BETWEEN */ + 219, /* (220) expr ::= expr between_op expr AND expr */ + 279, /* (221) in_op ::= IN */ + 279, /* (222) in_op ::= NOT IN */ + 219, /* (223) expr ::= expr in_op LP exprlist RP */ + 219, /* (224) expr ::= LP select RP */ + 219, /* (225) expr ::= expr in_op LP select RP */ + 219, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ + 219, /* (227) expr ::= EXISTS LP select RP */ + 219, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ + 282, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 282, /* (230) case_exprlist ::= WHEN expr THEN expr */ + 283, /* (231) case_else ::= ELSE expr */ + 283, /* (232) case_else ::= */ + 281, /* (233) case_operand ::= */ + 264, /* (234) exprlist ::= */ + 255, /* (235) nexprlist ::= nexprlist COMMA expr */ + 255, /* (236) nexprlist ::= expr */ + 280, /* (237) paren_exprlist ::= */ + 280, /* (238) paren_exprlist ::= LP exprlist RP */ + 192, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + 284, /* (240) uniqueflag ::= UNIQUE */ + 284, /* (241) uniqueflag ::= */ + 223, /* (242) eidlist_opt ::= */ + 223, /* (243) eidlist_opt ::= LP eidlist RP */ + 234, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ + 234, /* (245) eidlist ::= nm collate sortorder */ + 285, /* (246) collate ::= */ + 285, /* (247) collate ::= COLLATE ID|STRING */ + 192, /* (248) cmd ::= DROP INDEX ifexists fullname */ + 192, /* (249) cmd ::= VACUUM vinto */ + 192, /* (250) cmd ::= VACUUM nm vinto */ + 286, /* (251) vinto ::= INTO expr */ + 286, /* (252) vinto ::= */ + 192, /* (253) cmd ::= PRAGMA nm dbnm */ + 192, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ + 192, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + 192, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ + 192, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + 213, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ + 214, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ + 192, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + 288, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + 290, /* (262) trigger_time ::= BEFORE|AFTER */ + 290, /* (263) trigger_time ::= INSTEAD OF */ + 290, /* (264) trigger_time ::= */ + 291, /* (265) trigger_event ::= DELETE|INSERT */ + 291, /* (266) trigger_event ::= UPDATE */ + 291, /* (267) trigger_event ::= UPDATE OF idlist */ + 293, /* (268) when_clause ::= */ + 293, /* (269) when_clause ::= WHEN expr */ + 289, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + 289, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ + 295, /* (272) trnm ::= nm DOT nm */ + 296, /* (273) tridxby ::= INDEXED BY nm */ + 296, /* (274) tridxby ::= NOT INDEXED */ + 294, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + 294, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + 294, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + 294, /* (278) trigger_cmd ::= scanpt select scanpt */ + 219, /* (279) expr ::= RAISE LP IGNORE RP */ + 219, /* (280) expr ::= RAISE LP raisetype COMMA expr RP */ + 238, /* (281) raisetype ::= ROLLBACK */ + 238, /* (282) raisetype ::= ABORT */ + 238, /* (283) raisetype ::= FAIL */ + 192, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ + 192, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + 192, /* (286) cmd ::= DETACH database_kw_opt expr */ + 298, /* (287) key_opt ::= */ + 298, /* (288) key_opt ::= KEY expr */ + 192, /* (289) cmd ::= REINDEX */ + 192, /* (290) cmd ::= REINDEX nm dbnm */ + 192, /* (291) cmd ::= ANALYZE */ + 192, /* (292) cmd ::= ANALYZE nm dbnm */ + 192, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ + 192, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + 192, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + 299, /* (296) add_column_fullname ::= fullname */ + 192, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + 192, /* (298) cmd ::= create_vtab */ + 192, /* (299) cmd ::= create_vtab LP vtabarglist RP */ + 301, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 303, /* (301) vtabarg ::= */ + 304, /* (302) vtabargtoken ::= ANY */ + 304, /* (303) vtabargtoken ::= lp anylist RP */ + 305, /* (304) lp ::= LP */ + 269, /* (305) with ::= WITH wqlist */ + 269, /* (306) with ::= WITH RECURSIVE wqlist */ + 308, /* (307) wqas ::= AS */ + 308, /* (308) wqas ::= AS MATERIALIZED */ + 308, /* (309) wqas ::= AS NOT MATERIALIZED */ + 307, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ + 309, /* (311) withnm ::= nm */ + 243, /* (312) wqlist ::= wqitem */ + 243, /* (313) wqlist ::= wqlist COMMA wqitem */ + 310, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 311, /* (315) windowdefn ::= nm AS LP window RP */ + 312, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 312, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 312, /* (318) window ::= ORDER BY sortlist frame_opt */ + 312, /* (319) window ::= nm ORDER BY sortlist frame_opt */ + 312, /* (320) window ::= nm frame_opt */ + 313, /* (321) frame_opt ::= */ + 313, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 313, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 317, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ + 319, /* (325) frame_bound_s ::= frame_bound */ + 319, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ + 320, /* (327) frame_bound_e ::= frame_bound */ + 320, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 318, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ + 318, /* (330) frame_bound ::= CURRENT ROW */ + 321, /* (331) frame_exclude_opt ::= */ + 321, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 322, /* (333) frame_exclude ::= NO OTHERS */ + 322, /* (334) frame_exclude ::= CURRENT ROW */ + 322, /* (335) frame_exclude ::= GROUP|TIES */ + 253, /* (336) window_clause ::= WINDOW windowdefn_list */ + 276, /* (337) filter_over ::= filter_clause over_clause */ + 276, /* (338) filter_over ::= over_clause */ + 276, /* (339) filter_over ::= filter_clause */ + 316, /* (340) over_clause ::= OVER LP window RP */ + 316, /* (341) over_clause ::= OVER nm */ + 315, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ + 218, /* (343) term ::= QNUMBER */ + 187, /* (344) input ::= cmdlist */ + 188, /* (345) cmdlist ::= cmdlist ecmd */ + 188, /* (346) cmdlist ::= ecmd */ + 189, /* (347) ecmd ::= SEMI */ + 189, /* (348) ecmd ::= cmdx SEMI */ + 189, /* (349) ecmd ::= explain cmdx SEMI */ + 194, /* (350) trans_opt ::= */ + 194, /* (351) trans_opt ::= TRANSACTION */ + 194, /* (352) trans_opt ::= TRANSACTION nm */ + 196, /* (353) savepoint_opt ::= SAVEPOINT */ + 196, /* (354) savepoint_opt ::= */ + 192, /* (355) cmd ::= create_table create_table_args */ + 205, /* (356) table_option_set ::= table_option */ + 203, /* (357) columnlist ::= columnlist COMMA columnname carglist */ + 203, /* (358) columnlist ::= columnname carglist */ + 195, /* (359) nm ::= ID|INDEXED|JOIN_KW */ + 195, /* (360) nm ::= STRING */ + 210, /* (361) typetoken ::= typename */ + 211, /* (362) typename ::= ID|STRING */ + 212, /* (363) signed ::= plus_num */ + 212, /* (364) signed ::= minus_num */ + 209, /* (365) carglist ::= carglist ccons */ + 209, /* (366) carglist ::= */ + 217, /* (367) ccons ::= NULL onconf */ + 217, /* (368) ccons ::= GENERATED ALWAYS AS generated */ + 217, /* (369) ccons ::= AS generated */ + 204, /* (370) conslist_opt ::= COMMA conslist */ + 230, /* (371) conslist ::= conslist tconscomma tcons */ + 230, /* (372) conslist ::= tcons */ + 231, /* (373) tconscomma ::= */ + 235, /* (374) defer_subclause_opt ::= defer_subclause */ + 237, /* (375) resolvetype ::= raisetype */ + 241, /* (376) selectnowith ::= oneselect */ + 242, /* (377) oneselect ::= values */ + 257, /* (378) sclp ::= selcollist COMMA */ + 258, /* (379) as ::= ID|STRING */ + 267, /* (380) indexed_opt ::= indexed_by */ + 275, /* (381) returning ::= */ + 219, /* (382) expr ::= term */ + 277, /* (383) likeop ::= LIKE_KW|MATCH */ + 281, /* (384) case_operand ::= expr */ + 264, /* (385) exprlist ::= nexprlist */ + 287, /* (386) nmnum ::= plus_num */ + 287, /* (387) nmnum ::= nm */ + 287, /* (388) nmnum ::= ON */ + 287, /* (389) nmnum ::= DELETE */ + 287, /* (390) nmnum ::= DEFAULT */ + 213, /* (391) plus_num ::= INTEGER|FLOAT */ + 292, /* (392) foreach_clause ::= */ + 292, /* (393) foreach_clause ::= FOR EACH ROW */ + 295, /* (394) trnm ::= nm */ + 296, /* (395) tridxby ::= */ + 297, /* (396) database_kw_opt ::= DATABASE */ + 297, /* (397) database_kw_opt ::= */ + 300, /* (398) kwcolumn_opt ::= */ + 300, /* (399) kwcolumn_opt ::= COLUMNKW */ + 302, /* (400) vtabarglist ::= vtabarg */ + 302, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ + 303, /* (402) vtabarg ::= vtabarg vtabargtoken */ + 306, /* (403) anylist ::= */ + 306, /* (404) anylist ::= anylist LP anylist RP */ + 306, /* (405) anylist ::= anylist ANY */ + 269, /* (406) with ::= */ + 310, /* (407) windowdefn_list ::= windowdefn */ + 312, /* (408) window ::= frame_opt */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -175937,7 +177701,7 @@ static const signed char yyRuleInfoNRhs[] = { -6, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -3, /* (278) trigger_cmd ::= scanpt select scanpt */ -4, /* (279) expr ::= RAISE LP IGNORE RP */ - -6, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ + -6, /* (280) expr ::= RAISE LP raisetype COMMA expr RP */ -1, /* (281) raisetype ::= ROLLBACK */ -1, /* (282) raisetype ::= ABORT */ -1, /* (283) raisetype ::= FAIL */ @@ -176117,16 +177881,16 @@ static YYACTIONTYPE yy_reduce( { sqlite3FinishCoding(pParse); } break; case 3: /* cmd ::= BEGIN transtype trans_opt */ -{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy144);} +{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy502);} break; case 4: /* transtype ::= */ -{yymsp[1].minor.yy144 = TK_DEFERRED;} +{yymsp[1].minor.yy502 = TK_DEFERRED;} break; case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); case 324: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==324); -{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/} +{yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */ case 9: /* cmd ::= ROLLBACK trans_opt */ yytestcase(yyruleno==9); @@ -176149,7 +177913,7 @@ static YYACTIONTYPE yy_reduce( break; case 13: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ { - sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy144,0,0,yymsp[-2].minor.yy144); + sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy502,0,0,yymsp[-2].minor.yy502); } break; case 14: /* createkw ::= CREATE */ @@ -176163,38 +177927,38 @@ static YYACTIONTYPE yy_reduce( case 81: /* ifexists ::= */ yytestcase(yyruleno==81); case 100: /* distinct ::= */ yytestcase(yyruleno==100); case 246: /* collate ::= */ yytestcase(yyruleno==246); -{yymsp[1].minor.yy144 = 0;} +{yymsp[1].minor.yy502 = 0;} break; case 16: /* ifnotexists ::= IF NOT EXISTS */ -{yymsp[-2].minor.yy144 = 1;} +{yymsp[-2].minor.yy502 = 1;} break; case 17: /* temp ::= TEMP */ -{yymsp[0].minor.yy144 = pParse->db->init.busy==0;} +{yymsp[0].minor.yy502 = pParse->db->init.busy==0;} break; case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_option_set */ { - sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy391,0); + sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy9,0); } break; case 20: /* create_table_args ::= AS select */ { - sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy555); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); + sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy637); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy637); } break; case 21: /* table_option_set ::= */ -{yymsp[1].minor.yy391 = 0;} +{yymsp[1].minor.yy9 = 0;} break; case 22: /* table_option_set ::= table_option_set COMMA table_option */ -{yylhsminor.yy391 = yymsp[-2].minor.yy391|yymsp[0].minor.yy391;} - yymsp[-2].minor.yy391 = yylhsminor.yy391; +{yylhsminor.yy9 = yymsp[-2].minor.yy9|yymsp[0].minor.yy9;} + yymsp[-2].minor.yy9 = yylhsminor.yy9; break; case 23: /* table_option ::= WITHOUT nm */ { if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ - yymsp[-1].minor.yy391 = TF_WithoutRowid | TF_NoVisibleRowid; + yymsp[-1].minor.yy9 = TF_WithoutRowid | TF_NoVisibleRowid; }else{ - yymsp[-1].minor.yy391 = 0; + yymsp[-1].minor.yy9 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } @@ -176202,13 +177966,13 @@ static YYACTIONTYPE yy_reduce( case 24: /* table_option ::= nm */ { if( yymsp[0].minor.yy0.n==6 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"strict",6)==0 ){ - yylhsminor.yy391 = TF_Strict; + yylhsminor.yy9 = TF_Strict; }else{ - yylhsminor.yy391 = 0; + yylhsminor.yy9 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } - yymsp[0].minor.yy391 = yylhsminor.yy391; + yymsp[0].minor.yy9 = yylhsminor.yy9; break; case 25: /* columnname ::= nm typetoken */ {sqlite3AddColumn(pParse,yymsp[-1].minor.yy0,yymsp[0].minor.yy0);} @@ -176234,7 +177998,7 @@ static YYACTIONTYPE yy_reduce( case 30: /* scanpt ::= */ { assert( yyLookahead!=YYNOCODE ); - yymsp[1].minor.yy168 = yyLookaheadToken.z; + yymsp[1].minor.yy342 = yyLookaheadToken.z; } break; case 31: /* scantok ::= */ @@ -176248,17 +178012,17 @@ static YYACTIONTYPE yy_reduce( {pParse->constraintName = yymsp[0].minor.yy0;} break; case 33: /* ccons ::= DEFAULT scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy590,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 34: /* ccons ::= DEFAULT LP expr RP */ -{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} +{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy590,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} break; case 35: /* ccons ::= DEFAULT PLUS scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy590,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 36: /* ccons ::= DEFAULT MINUS scantok term */ { - Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy454, 0); + Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy590, 0); sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]); } break; @@ -176273,151 +178037,155 @@ static YYACTIONTYPE yy_reduce( } break; case 38: /* ccons ::= NOT NULL onconf */ -{sqlite3AddNotNull(pParse, yymsp[0].minor.yy144);} +{sqlite3AddNotNull(pParse, yymsp[0].minor.yy502);} break; case 39: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ -{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy144,yymsp[0].minor.yy144,yymsp[-2].minor.yy144);} +{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy502,yymsp[0].minor.yy502,yymsp[-2].minor.yy502);} break; case 40: /* ccons ::= UNIQUE onconf */ -{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy144,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy502,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 41: /* ccons ::= CHECK LP expr RP */ -{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy590,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} break; case 42: /* ccons ::= REFERENCES nm eidlist_opt refargs */ -{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy14,yymsp[0].minor.yy144);} +{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy402,yymsp[0].minor.yy502);} break; case 43: /* ccons ::= defer_subclause */ -{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy144);} +{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy502);} break; case 44: /* ccons ::= COLLATE ID|STRING */ {sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} break; case 45: /* generated ::= LP expr RP */ -{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy454,0);} +{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy590,0);} break; case 46: /* generated ::= LP expr RP ID */ -{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy454,&yymsp[0].minor.yy0);} +{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy590,&yymsp[0].minor.yy0);} break; case 48: /* autoinc ::= AUTOINCR */ -{yymsp[0].minor.yy144 = 1;} +{yymsp[0].minor.yy502 = 1;} break; case 49: /* refargs ::= */ -{ yymsp[1].minor.yy144 = OE_None*0x0101; /* EV: R-19803-45884 */} +{ yymsp[1].minor.yy502 = OE_None*0x0101; /* EV: R-19803-45884 */} break; case 50: /* refargs ::= refargs refarg */ -{ yymsp[-1].minor.yy144 = (yymsp[-1].minor.yy144 & ~yymsp[0].minor.yy383.mask) | yymsp[0].minor.yy383.value; } +{ yymsp[-1].minor.yy502 = (yymsp[-1].minor.yy502 & ~yymsp[0].minor.yy481.mask) | yymsp[0].minor.yy481.value; } break; case 51: /* refarg ::= MATCH nm */ -{ yymsp[-1].minor.yy383.value = 0; yymsp[-1].minor.yy383.mask = 0x000000; } +{ yymsp[-1].minor.yy481.value = 0; yymsp[-1].minor.yy481.mask = 0x000000; } break; case 52: /* refarg ::= ON INSERT refact */ -{ yymsp[-2].minor.yy383.value = 0; yymsp[-2].minor.yy383.mask = 0x000000; } +{ yymsp[-2].minor.yy481.value = 0; yymsp[-2].minor.yy481.mask = 0x000000; } break; case 53: /* refarg ::= ON DELETE refact */ -{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144; yymsp[-2].minor.yy383.mask = 0x0000ff; } +{ yymsp[-2].minor.yy481.value = yymsp[0].minor.yy502; yymsp[-2].minor.yy481.mask = 0x0000ff; } break; case 54: /* refarg ::= ON UPDATE refact */ -{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144<<8; yymsp[-2].minor.yy383.mask = 0x00ff00; } +{ yymsp[-2].minor.yy481.value = yymsp[0].minor.yy502<<8; yymsp[-2].minor.yy481.mask = 0x00ff00; } break; case 55: /* refact ::= SET NULL */ -{ yymsp[-1].minor.yy144 = OE_SetNull; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy502 = OE_SetNull; /* EV: R-33326-45252 */} break; case 56: /* refact ::= SET DEFAULT */ -{ yymsp[-1].minor.yy144 = OE_SetDflt; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy502 = OE_SetDflt; /* EV: R-33326-45252 */} break; case 57: /* refact ::= CASCADE */ -{ yymsp[0].minor.yy144 = OE_Cascade; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy502 = OE_Cascade; /* EV: R-33326-45252 */} break; case 58: /* refact ::= RESTRICT */ -{ yymsp[0].minor.yy144 = OE_Restrict; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy502 = OE_Restrict; /* EV: R-33326-45252 */} break; case 59: /* refact ::= NO ACTION */ -{ yymsp[-1].minor.yy144 = OE_None; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy502 = OE_None; /* EV: R-33326-45252 */} break; case 60: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ -{yymsp[-2].minor.yy144 = 0;} +{yymsp[-2].minor.yy502 = 0;} break; case 61: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ case 76: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==76); case 173: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==173); -{yymsp[-1].minor.yy144 = yymsp[0].minor.yy144;} +{yymsp[-1].minor.yy502 = yymsp[0].minor.yy502;} break; case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80); case 219: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==219); case 222: /* in_op ::= NOT IN */ yytestcase(yyruleno==222); case 247: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==247); -{yymsp[-1].minor.yy144 = 1;} +{yymsp[-1].minor.yy502 = 1;} break; case 64: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ -{yymsp[-1].minor.yy144 = 0;} +{yymsp[-1].minor.yy502 = 0;} break; case 66: /* tconscomma ::= COMMA */ {pParse->constraintName.n = 0;} break; case 68: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ -{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy14,yymsp[0].minor.yy144,yymsp[-2].minor.yy144,0);} +{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy402,yymsp[0].minor.yy502,yymsp[-2].minor.yy502,0);} break; case 69: /* tcons ::= UNIQUE LP sortlist RP onconf */ -{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy14,yymsp[0].minor.yy144,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy402,yymsp[0].minor.yy502,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 70: /* tcons ::= CHECK LP expr RP onconf */ -{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy454,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy590,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} break; case 71: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ { - sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy14, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[-1].minor.yy144); - sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy144); + sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy402, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy402, yymsp[-1].minor.yy502); + sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy502); } break; case 73: /* onconf ::= */ case 75: /* orconf ::= */ yytestcase(yyruleno==75); -{yymsp[1].minor.yy144 = OE_Default;} +{yymsp[1].minor.yy502 = OE_Default;} break; case 74: /* onconf ::= ON CONFLICT resolvetype */ -{yymsp[-2].minor.yy144 = yymsp[0].minor.yy144;} +{yymsp[-2].minor.yy502 = yymsp[0].minor.yy502;} break; case 77: /* resolvetype ::= IGNORE */ -{yymsp[0].minor.yy144 = OE_Ignore;} +{yymsp[0].minor.yy502 = OE_Ignore;} break; case 78: /* resolvetype ::= REPLACE */ case 174: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==174); -{yymsp[0].minor.yy144 = OE_Replace;} +{yymsp[0].minor.yy502 = OE_Replace;} break; case 79: /* cmd ::= DROP TABLE ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy203, 0, yymsp[-1].minor.yy144); + sqlite3DropTable(pParse, yymsp[0].minor.yy563, 0, yymsp[-1].minor.yy502); } break; case 82: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ { - sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[0].minor.yy555, yymsp[-7].minor.yy144, yymsp[-5].minor.yy144); + sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy402, yymsp[0].minor.yy637, yymsp[-7].minor.yy502, yymsp[-5].minor.yy502); } break; case 83: /* cmd ::= DROP VIEW ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy203, 1, yymsp[-1].minor.yy144); + sqlite3DropTable(pParse, yymsp[0].minor.yy563, 1, yymsp[-1].minor.yy502); } break; case 84: /* cmd ::= select */ { SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0, 0}; - sqlite3Select(pParse, yymsp[0].minor.yy555, &dest); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); + if( (pParse->db->mDbFlags & DBFLAG_EncodingFixed)!=0 + || sqlite3ReadSchema(pParse)==SQLITE_OK + ){ + sqlite3Select(pParse, yymsp[0].minor.yy637, &dest); + } + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy637); } break; case 85: /* select ::= WITH wqlist selectnowith */ -{yymsp[-2].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} +{yymsp[-2].minor.yy637 = attachWithToSelect(pParse,yymsp[0].minor.yy637,yymsp[-1].minor.yy125);} break; case 86: /* select ::= WITH RECURSIVE wqlist selectnowith */ -{yymsp[-3].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} +{yymsp[-3].minor.yy637 = attachWithToSelect(pParse,yymsp[0].minor.yy637,yymsp[-1].minor.yy125);} break; case 87: /* select ::= selectnowith */ { - Select *p = yymsp[0].minor.yy555; + Select *p = yymsp[0].minor.yy637; if( p ){ parserDoubleLinkSelect(pParse, p); } @@ -176425,8 +178193,8 @@ static YYACTIONTYPE yy_reduce( break; case 88: /* selectnowith ::= selectnowith multiselect_op oneselect */ { - Select *pRhs = yymsp[0].minor.yy555; - Select *pLhs = yymsp[-2].minor.yy555; + Select *pRhs = yymsp[0].minor.yy637; + Select *pLhs = yymsp[-2].minor.yy637; if( pRhs && pRhs->pPrior ){ SrcList *pFrom; Token x; @@ -176436,60 +178204,60 @@ static YYACTIONTYPE yy_reduce( pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0); } if( pRhs ){ - pRhs->op = (u8)yymsp[-1].minor.yy144; + pRhs->op = (u8)yymsp[-1].minor.yy502; pRhs->pPrior = pLhs; if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; pRhs->selFlags &= ~SF_MultiValue; - if( yymsp[-1].minor.yy144!=TK_ALL ) pParse->hasCompound = 1; + if( yymsp[-1].minor.yy502!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); } - yymsp[-2].minor.yy555 = pRhs; + yymsp[-2].minor.yy637 = pRhs; } break; case 89: /* multiselect_op ::= UNION */ case 91: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==91); -{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-OP*/} +{yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-OP*/} break; case 90: /* multiselect_op ::= UNION ALL */ -{yymsp[-1].minor.yy144 = TK_ALL;} +{yymsp[-1].minor.yy502 = TK_ALL;} break; case 92: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ { - yymsp[-8].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy14,yymsp[-5].minor.yy203,yymsp[-4].minor.yy454,yymsp[-3].minor.yy14,yymsp[-2].minor.yy454,yymsp[-1].minor.yy14,yymsp[-7].minor.yy144,yymsp[0].minor.yy454); + yymsp[-8].minor.yy637 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy402,yymsp[-5].minor.yy563,yymsp[-4].minor.yy590,yymsp[-3].minor.yy402,yymsp[-2].minor.yy590,yymsp[-1].minor.yy402,yymsp[-7].minor.yy502,yymsp[0].minor.yy590); } break; case 93: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ { - yymsp[-9].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy14,yymsp[-6].minor.yy203,yymsp[-5].minor.yy454,yymsp[-4].minor.yy14,yymsp[-3].minor.yy454,yymsp[-1].minor.yy14,yymsp[-8].minor.yy144,yymsp[0].minor.yy454); - if( yymsp[-9].minor.yy555 ){ - yymsp[-9].minor.yy555->pWinDefn = yymsp[-2].minor.yy211; + yymsp[-9].minor.yy637 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy402,yymsp[-6].minor.yy563,yymsp[-5].minor.yy590,yymsp[-4].minor.yy402,yymsp[-3].minor.yy590,yymsp[-1].minor.yy402,yymsp[-8].minor.yy502,yymsp[0].minor.yy590); + if( yymsp[-9].minor.yy637 ){ + yymsp[-9].minor.yy637->pWinDefn = yymsp[-2].minor.yy483; }else{ - sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy211); + sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy483); } } break; case 94: /* values ::= VALUES LP nexprlist RP */ { - yymsp[-3].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values,0); + yymsp[-3].minor.yy637 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy402,0,0,0,0,0,SF_Values,0); } break; case 95: /* oneselect ::= mvalues */ { - sqlite3MultiValuesEnd(pParse, yymsp[0].minor.yy555); + sqlite3MultiValuesEnd(pParse, yymsp[0].minor.yy637); } break; case 96: /* mvalues ::= values COMMA LP nexprlist RP */ case 97: /* mvalues ::= mvalues COMMA LP nexprlist RP */ yytestcase(yyruleno==97); { - yymsp[-4].minor.yy555 = sqlite3MultiValues(pParse, yymsp[-4].minor.yy555, yymsp[-1].minor.yy14); + yymsp[-4].minor.yy637 = sqlite3MultiValues(pParse, yymsp[-4].minor.yy637, yymsp[-1].minor.yy402); } break; case 98: /* distinct ::= DISTINCT */ -{yymsp[0].minor.yy144 = SF_Distinct;} +{yymsp[0].minor.yy502 = SF_Distinct;} break; case 99: /* distinct ::= ALL */ -{yymsp[0].minor.yy144 = SF_All;} +{yymsp[0].minor.yy502 = SF_All;} break; case 101: /* sclp ::= */ case 134: /* orderby_opt ::= */ yytestcase(yyruleno==134); @@ -176497,20 +178265,20 @@ static YYACTIONTYPE yy_reduce( case 234: /* exprlist ::= */ yytestcase(yyruleno==234); case 237: /* paren_exprlist ::= */ yytestcase(yyruleno==237); case 242: /* eidlist_opt ::= */ yytestcase(yyruleno==242); -{yymsp[1].minor.yy14 = 0;} +{yymsp[1].minor.yy402 = 0;} break; case 102: /* selcollist ::= sclp scanpt expr scanpt as */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); - if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[0].minor.yy0, 1); - sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy14,yymsp[-3].minor.yy168,yymsp[-1].minor.yy168); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy402, yymsp[-2].minor.yy590); + if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy402, &yymsp[0].minor.yy0, 1); + sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy402,yymsp[-3].minor.yy342,yymsp[-1].minor.yy342); } break; case 103: /* selcollist ::= sclp scanpt STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); sqlite3ExprSetErrorOffset(p, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); - yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy14, p); + yymsp[-2].minor.yy402 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy402, p); } break; case 104: /* selcollist ::= sclp scanpt nm DOT STAR */ @@ -176520,7 +178288,7 @@ static YYACTIONTYPE yy_reduce( sqlite3ExprSetErrorOffset(pRight, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0); pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, pDot); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402, pDot); } break; case 105: /* as ::= AS nm */ @@ -176531,55 +178299,65 @@ static YYACTIONTYPE yy_reduce( break; case 107: /* from ::= */ case 110: /* stl_prefix ::= */ yytestcase(yyruleno==110); -{yymsp[1].minor.yy203 = 0;} +{yymsp[1].minor.yy563 = 0;} break; case 108: /* from ::= FROM seltablist */ { - yymsp[-1].minor.yy203 = yymsp[0].minor.yy203; - sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy203); + yymsp[-1].minor.yy563 = yymsp[0].minor.yy563; + sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy563); } break; case 109: /* stl_prefix ::= seltablist joinop */ { - if( ALWAYS(yymsp[-1].minor.yy203 && yymsp[-1].minor.yy203->nSrc>0) ) yymsp[-1].minor.yy203->a[yymsp[-1].minor.yy203->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy144; + if( ALWAYS(yymsp[-1].minor.yy563 && yymsp[-1].minor.yy563->nSrc>0) ) yymsp[-1].minor.yy563->a[yymsp[-1].minor.yy563->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy502; } break; case 111: /* seltablist ::= stl_prefix nm dbnm as on_using */ { - yymsp[-4].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy203,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); + yymsp[-4].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy563,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy421); } break; case 112: /* seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ { - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy269); - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy203, &yymsp[-1].minor.yy0); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy421); + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy563, &yymsp[-1].minor.yy0); } break; case 113: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ { - yymsp[-7].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy203,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); - sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy203, yymsp[-3].minor.yy14); + yymsp[-7].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy563,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy421); + sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy563, yymsp[-3].minor.yy402); } break; case 114: /* seltablist ::= stl_prefix LP select RP as on_using */ { - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy555,&yymsp[0].minor.yy269); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy637,&yymsp[0].minor.yy421); } break; case 115: /* seltablist ::= stl_prefix LP seltablist RP as on_using */ { - if( yymsp[-5].minor.yy203==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy269.pOn==0 && yymsp[0].minor.yy269.pUsing==0 ){ - yymsp[-5].minor.yy203 = yymsp[-3].minor.yy203; - }else if( ALWAYS(yymsp[-3].minor.yy203!=0) && yymsp[-3].minor.yy203->nSrc==1 ){ - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); - if( yymsp[-5].minor.yy203 ){ - SrcItem *pNew = &yymsp[-5].minor.yy203->a[yymsp[-5].minor.yy203->nSrc-1]; - SrcItem *pOld = yymsp[-3].minor.yy203->a; + if( yymsp[-5].minor.yy563==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy421.pOn==0 && yymsp[0].minor.yy421.pUsing==0 ){ + yymsp[-5].minor.yy563 = yymsp[-3].minor.yy563; + }else if( ALWAYS(yymsp[-3].minor.yy563!=0) && yymsp[-3].minor.yy563->nSrc==1 ){ + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy421); + if( yymsp[-5].minor.yy563 ){ + SrcItem *pNew = &yymsp[-5].minor.yy563->a[yymsp[-5].minor.yy563->nSrc-1]; + SrcItem *pOld = yymsp[-3].minor.yy563->a; + assert( pOld->fg.fixedSchema==0 ); pNew->zName = pOld->zName; - pNew->zDatabase = pOld->zDatabase; - pNew->pSelect = pOld->pSelect; - if( pNew->pSelect && (pNew->pSelect->selFlags & SF_NestedFrom)!=0 ){ - pNew->fg.isNestedFrom = 1; + assert( pOld->fg.fixedSchema==0 ); + if( pOld->fg.isSubquery ){ + pNew->fg.isSubquery = 1; + pNew->u4.pSubq = pOld->u4.pSubq; + pOld->u4.pSubq = 0; + pOld->fg.isSubquery = 0; + assert( pNew->u4.pSubq!=0 && pNew->u4.pSubq->pSelect!=0 ); + if( (pNew->u4.pSubq->pSelect->selFlags & SF_NestedFrom)!=0 ){ + pNew->fg.isNestedFrom = 1; + } + }else{ + pNew->u4.zDatabase = pOld->u4.zDatabase; + pOld->u4.zDatabase = 0; } if( pOld->fg.isTabFunc ){ pNew->u1.pFuncArg = pOld->u1.pFuncArg; @@ -176587,15 +178365,14 @@ static YYACTIONTYPE yy_reduce( pOld->fg.isTabFunc = 0; pNew->fg.isTabFunc = 1; } - pOld->zName = pOld->zDatabase = 0; - pOld->pSelect = 0; + pOld->zName = 0; } - sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy203); + sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy563); }else{ Select *pSubquery; - sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy203); - pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy203,0,0,0,0,SF_NestedFrom,0); - yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy269); + sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy563); + pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy563,0,0,0,0,SF_NestedFrom,0); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy563,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy421); } } break; @@ -176605,56 +178382,56 @@ static YYACTIONTYPE yy_reduce( break; case 118: /* fullname ::= nm */ { - yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); - if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); + if( IN_RENAME_OBJECT && yylhsminor.yy563 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy563->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy203 = yylhsminor.yy203; + yymsp[0].minor.yy563 = yylhsminor.yy563; break; case 119: /* fullname ::= nm DOT nm */ { - yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); - if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); + if( IN_RENAME_OBJECT && yylhsminor.yy563 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy563->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy203 = yylhsminor.yy203; + yymsp[-2].minor.yy563 = yylhsminor.yy563; break; case 120: /* xfullname ::= nm */ -{yymsp[0].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} +{yymsp[0].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} break; case 121: /* xfullname ::= nm DOT nm */ -{yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[-2].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 122: /* xfullname ::= nm DOT nm AS nm */ { - yymsp[-4].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ - if( yymsp[-4].minor.yy203 ) yymsp[-4].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-4].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ + if( yymsp[-4].minor.yy563 ) yymsp[-4].minor.yy563->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; case 123: /* xfullname ::= nm AS nm */ { - yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ - if( yymsp[-2].minor.yy203 ) yymsp[-2].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-2].minor.yy563 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ + if( yymsp[-2].minor.yy563 ) yymsp[-2].minor.yy563->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; case 124: /* joinop ::= COMMA|JOIN */ -{ yymsp[0].minor.yy144 = JT_INNER; } +{ yymsp[0].minor.yy502 = JT_INNER; } break; case 125: /* joinop ::= JOIN_KW JOIN */ -{yymsp[-1].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} +{yymsp[-1].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} break; case 126: /* joinop ::= JOIN_KW nm JOIN */ -{yymsp[-2].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} +{yymsp[-2].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} break; case 127: /* joinop ::= JOIN_KW nm nm JOIN */ -{yymsp[-3].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} +{yymsp[-3].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} break; case 128: /* on_using ::= ON expr */ -{yymsp[-1].minor.yy269.pOn = yymsp[0].minor.yy454; yymsp[-1].minor.yy269.pUsing = 0;} +{yymsp[-1].minor.yy421.pOn = yymsp[0].minor.yy590; yymsp[-1].minor.yy421.pUsing = 0;} break; case 129: /* on_using ::= USING LP idlist RP */ -{yymsp[-3].minor.yy269.pOn = 0; yymsp[-3].minor.yy269.pUsing = yymsp[-1].minor.yy132;} +{yymsp[-3].minor.yy421.pOn = 0; yymsp[-3].minor.yy421.pUsing = yymsp[-1].minor.yy204;} break; case 130: /* on_using ::= */ -{yymsp[1].minor.yy269.pOn = 0; yymsp[1].minor.yy269.pUsing = 0;} +{yymsp[1].minor.yy421.pOn = 0; yymsp[1].minor.yy421.pUsing = 0;} break; case 132: /* indexed_by ::= INDEXED BY nm */ {yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;} @@ -176664,35 +178441,35 @@ static YYACTIONTYPE yy_reduce( break; case 135: /* orderby_opt ::= ORDER BY sortlist */ case 145: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==145); -{yymsp[-2].minor.yy14 = yymsp[0].minor.yy14;} +{yymsp[-2].minor.yy402 = yymsp[0].minor.yy402;} break; case 136: /* sortlist ::= sortlist COMMA expr sortorder nulls */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14,yymsp[-2].minor.yy454); - sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402,yymsp[-2].minor.yy590); + sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy402,yymsp[-1].minor.yy502,yymsp[0].minor.yy502); } break; case 137: /* sortlist ::= expr sortorder nulls */ { - yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy454); /*A-overwrites-Y*/ - sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); + yymsp[-2].minor.yy402 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy590); /*A-overwrites-Y*/ + sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy402,yymsp[-1].minor.yy502,yymsp[0].minor.yy502); } break; case 138: /* sortorder ::= ASC */ -{yymsp[0].minor.yy144 = SQLITE_SO_ASC;} +{yymsp[0].minor.yy502 = SQLITE_SO_ASC;} break; case 139: /* sortorder ::= DESC */ -{yymsp[0].minor.yy144 = SQLITE_SO_DESC;} +{yymsp[0].minor.yy502 = SQLITE_SO_DESC;} break; case 140: /* sortorder ::= */ case 143: /* nulls ::= */ yytestcase(yyruleno==143); -{yymsp[1].minor.yy144 = SQLITE_SO_UNDEFINED;} +{yymsp[1].minor.yy502 = SQLITE_SO_UNDEFINED;} break; case 141: /* nulls ::= NULLS FIRST */ -{yymsp[-1].minor.yy144 = SQLITE_SO_ASC;} +{yymsp[-1].minor.yy502 = SQLITE_SO_ASC;} break; case 142: /* nulls ::= NULLS LAST */ -{yymsp[-1].minor.yy144 = SQLITE_SO_DESC;} +{yymsp[-1].minor.yy502 = SQLITE_SO_DESC;} break; case 146: /* having_opt ::= */ case 148: /* limit_opt ::= */ yytestcase(yyruleno==148); @@ -176701,42 +178478,42 @@ static YYACTIONTYPE yy_reduce( case 232: /* case_else ::= */ yytestcase(yyruleno==232); case 233: /* case_operand ::= */ yytestcase(yyruleno==233); case 252: /* vinto ::= */ yytestcase(yyruleno==252); -{yymsp[1].minor.yy454 = 0;} +{yymsp[1].minor.yy590 = 0;} break; case 147: /* having_opt ::= HAVING expr */ case 154: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==154); case 156: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==156); case 231: /* case_else ::= ELSE expr */ yytestcase(yyruleno==231); case 251: /* vinto ::= INTO expr */ yytestcase(yyruleno==251); -{yymsp[-1].minor.yy454 = yymsp[0].minor.yy454;} +{yymsp[-1].minor.yy590 = yymsp[0].minor.yy590;} break; case 149: /* limit_opt ::= LIMIT expr */ -{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,0);} +{yymsp[-1].minor.yy590 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy590,0);} break; case 150: /* limit_opt ::= LIMIT expr OFFSET expr */ -{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} +{yymsp[-3].minor.yy590 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy590,yymsp[0].minor.yy590);} break; case 151: /* limit_opt ::= LIMIT expr COMMA expr */ -{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,yymsp[-2].minor.yy454);} +{yymsp[-3].minor.yy590 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy590,yymsp[-2].minor.yy590);} break; case 152: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy203, &yymsp[-1].minor.yy0); - sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy203,yymsp[0].minor.yy454,0,0); + sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy563, &yymsp[-1].minor.yy0); + sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy563,yymsp[0].minor.yy590,0,0); } break; case 157: /* where_opt_ret ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-1].minor.yy454 = 0;} +{sqlite3AddReturning(pParse,yymsp[0].minor.yy402); yymsp[-1].minor.yy590 = 0;} break; case 158: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-3].minor.yy454 = yymsp[-2].minor.yy454;} +{sqlite3AddReturning(pParse,yymsp[0].minor.yy402); yymsp[-3].minor.yy590 = yymsp[-2].minor.yy590;} break; case 159: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy203, &yymsp[-4].minor.yy0); - sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy14,"set list"); - if( yymsp[-1].minor.yy203 ){ - SrcList *pFromClause = yymsp[-1].minor.yy203; + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy563, &yymsp[-4].minor.yy0); + sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy402,"set list"); + if( yymsp[-1].minor.yy563 ){ + SrcList *pFromClause = yymsp[-1].minor.yy563; if( pFromClause->nSrc>1 ){ Select *pSubquery; Token as; @@ -176745,90 +178522,90 @@ static YYACTIONTYPE yy_reduce( as.z = 0; pFromClause = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&as,pSubquery,0); } - yymsp[-5].minor.yy203 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy203, pFromClause); + yymsp[-5].minor.yy563 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy563, pFromClause); } - sqlite3Update(pParse,yymsp[-5].minor.yy203,yymsp[-2].minor.yy14,yymsp[0].minor.yy454,yymsp[-6].minor.yy144,0,0,0); + sqlite3Update(pParse,yymsp[-5].minor.yy563,yymsp[-2].minor.yy402,yymsp[0].minor.yy590,yymsp[-6].minor.yy502,0,0,0); } break; case 160: /* setlist ::= setlist COMMA nm EQ expr */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[0].minor.yy454); - sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, 1); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy402, yymsp[0].minor.yy590); + sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy402, &yymsp[-2].minor.yy0, 1); } break; case 161: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ { - yymsp[-6].minor.yy14 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy14, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); + yymsp[-6].minor.yy402 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy402, yymsp[-3].minor.yy204, yymsp[0].minor.yy590); } break; case 162: /* setlist ::= nm EQ expr */ { - yylhsminor.yy14 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy454); - sqlite3ExprListSetName(pParse, yylhsminor.yy14, &yymsp[-2].minor.yy0, 1); + yylhsminor.yy402 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy590); + sqlite3ExprListSetName(pParse, yylhsminor.yy402, &yymsp[-2].minor.yy0, 1); } - yymsp[-2].minor.yy14 = yylhsminor.yy14; + yymsp[-2].minor.yy402 = yylhsminor.yy402; break; case 163: /* setlist ::= LP idlist RP EQ expr */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); + yymsp[-4].minor.yy402 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy204, yymsp[0].minor.yy590); } break; case 164: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ { - sqlite3Insert(pParse, yymsp[-3].minor.yy203, yymsp[-1].minor.yy555, yymsp[-2].minor.yy132, yymsp[-5].minor.yy144, yymsp[0].minor.yy122); + sqlite3Insert(pParse, yymsp[-3].minor.yy563, yymsp[-1].minor.yy637, yymsp[-2].minor.yy204, yymsp[-5].minor.yy502, yymsp[0].minor.yy403); } break; case 165: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ { - sqlite3Insert(pParse, yymsp[-4].minor.yy203, 0, yymsp[-3].minor.yy132, yymsp[-6].minor.yy144, 0); + sqlite3Insert(pParse, yymsp[-4].minor.yy563, 0, yymsp[-3].minor.yy204, yymsp[-6].minor.yy502, 0); } break; case 166: /* upsert ::= */ -{ yymsp[1].minor.yy122 = 0; } +{ yymsp[1].minor.yy403 = 0; } break; case 167: /* upsert ::= RETURNING selcollist */ -{ yymsp[-1].minor.yy122 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy14); } +{ yymsp[-1].minor.yy403 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy402); } break; case 168: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ -{ yymsp[-11].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy14,yymsp[-6].minor.yy454,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,yymsp[0].minor.yy122);} +{ yymsp[-11].minor.yy403 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy402,yymsp[-6].minor.yy590,yymsp[-2].minor.yy402,yymsp[-1].minor.yy590,yymsp[0].minor.yy403);} break; case 169: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ -{ yymsp[-8].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy14,yymsp[-3].minor.yy454,0,0,yymsp[0].minor.yy122); } +{ yymsp[-8].minor.yy403 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy402,yymsp[-3].minor.yy590,0,0,yymsp[0].minor.yy403); } break; case 170: /* upsert ::= ON CONFLICT DO NOTHING returning */ -{ yymsp[-4].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } +{ yymsp[-4].minor.yy403 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } break; case 171: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ -{ yymsp[-7].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,0);} +{ yymsp[-7].minor.yy403 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy402,yymsp[-1].minor.yy590,0);} break; case 172: /* returning ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy14);} +{sqlite3AddReturning(pParse,yymsp[0].minor.yy402);} break; case 175: /* idlist_opt ::= */ -{yymsp[1].minor.yy132 = 0;} +{yymsp[1].minor.yy204 = 0;} break; case 176: /* idlist_opt ::= LP idlist RP */ -{yymsp[-2].minor.yy132 = yymsp[-1].minor.yy132;} +{yymsp[-2].minor.yy204 = yymsp[-1].minor.yy204;} break; case 177: /* idlist ::= idlist COMMA nm */ -{yymsp[-2].minor.yy132 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy132,&yymsp[0].minor.yy0);} +{yymsp[-2].minor.yy204 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy204,&yymsp[0].minor.yy0);} break; case 178: /* idlist ::= nm */ -{yymsp[0].minor.yy132 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} +{yymsp[0].minor.yy204 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} break; case 179: /* expr ::= LP expr RP */ -{yymsp[-2].minor.yy454 = yymsp[-1].minor.yy454;} +{yymsp[-2].minor.yy590 = yymsp[-1].minor.yy590;} break; case 180: /* expr ::= ID|INDEXED|JOIN_KW */ -{yymsp[0].minor.yy454=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[0].minor.yy590=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 181: /* expr ::= nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); - yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); + yylhsminor.yy590 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); } - yymsp[-2].minor.yy454 = yylhsminor.yy454; + yymsp[-2].minor.yy590 = yylhsminor.yy590; break; case 182: /* expr ::= nm DOT nm DOT nm */ { @@ -176839,27 +178616,27 @@ static YYACTIONTYPE yy_reduce( if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, 0, temp1); } - yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); + yylhsminor.yy590 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); } - yymsp[-4].minor.yy454 = yylhsminor.yy454; + yymsp[-4].minor.yy590 = yylhsminor.yy590; break; case 183: /* term ::= NULL|FLOAT|BLOB */ case 184: /* term ::= STRING */ yytestcase(yyruleno==184); -{yymsp[0].minor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[0].minor.yy590=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 185: /* term ::= INTEGER */ { - yylhsminor.yy454 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); - if( yylhsminor.yy454 ) yylhsminor.yy454->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); + yylhsminor.yy590 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); + if( yylhsminor.yy590 ) yylhsminor.yy590->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); } - yymsp[0].minor.yy454 = yylhsminor.yy454; + yymsp[0].minor.yy590 = yylhsminor.yy590; break; case 186: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n; - yymsp[0].minor.yy454 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); - sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy454, n); + yymsp[0].minor.yy590 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); + sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy590, n); }else{ /* When doing a nested parse, one can include terms in an expression ** that look like this: #1 #2 ... These terms refer to registers @@ -176867,81 +178644,81 @@ static YYACTIONTYPE yy_reduce( Token t = yymsp[0].minor.yy0; /*A-overwrites-X*/ assert( t.n>=2 ); if( pParse->nested==0 ){ - sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); - yymsp[0].minor.yy454 = 0; + parserSyntaxError(pParse, &t); + yymsp[0].minor.yy590 = 0; }else{ - yymsp[0].minor.yy454 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); - if( yymsp[0].minor.yy454 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy454->iTable); + yymsp[0].minor.yy590 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); + if( yymsp[0].minor.yy590 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy590->iTable); } } } break; case 187: /* expr ::= expr COLLATE ID|STRING */ { - yymsp[-2].minor.yy454 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy454, &yymsp[0].minor.yy0, 1); + yymsp[-2].minor.yy590 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy590, &yymsp[0].minor.yy0, 1); } break; case 188: /* expr ::= CAST LP expr AS typetoken RP */ { - yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); - sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy454, yymsp[-3].minor.yy454, 0); + yymsp[-5].minor.yy590 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); + sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy590, yymsp[-3].minor.yy590, 0); } break; case 189: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy144); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy402, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy502); } - yymsp[-4].minor.yy454 = yylhsminor.yy454; + yymsp[-4].minor.yy590 = yylhsminor.yy590; break; case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy14, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy144); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-1].minor.yy14); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy402, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy502); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy590, yymsp[-1].minor.yy402); } - yymsp[-7].minor.yy454 = yylhsminor.yy454; + yymsp[-7].minor.yy590 = yylhsminor.yy590; break; case 191: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } - yymsp[-3].minor.yy454 = yylhsminor.yy454; + yymsp[-3].minor.yy590 = yylhsminor.yy590; break; case 192: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy14, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy144); - sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy402, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy502); + sqlite3WindowAttach(pParse, yylhsminor.yy590, yymsp[0].minor.yy483); } - yymsp[-5].minor.yy454 = yylhsminor.yy454; + yymsp[-5].minor.yy590 = yylhsminor.yy590; break; case 193: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy14, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy144); - sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-2].minor.yy14); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy402, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy502); + sqlite3WindowAttach(pParse, yylhsminor.yy590, yymsp[0].minor.yy483); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy590, yymsp[-2].minor.yy402); } - yymsp[-8].minor.yy454 = yylhsminor.yy454; + yymsp[-8].minor.yy590 = yylhsminor.yy590; break; case 194: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); - sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); + sqlite3WindowAttach(pParse, yylhsminor.yy590, yymsp[0].minor.yy483); } - yymsp[-4].minor.yy454 = yylhsminor.yy454; + yymsp[-4].minor.yy590 = yylhsminor.yy590; break; case 195: /* term ::= CTIME_KW */ { - yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } - yymsp[0].minor.yy454 = yylhsminor.yy454; + yymsp[0].minor.yy590 = yylhsminor.yy590; break; case 196: /* expr ::= LP nexprlist COMMA expr RP */ { - ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); - if( yymsp[-4].minor.yy454 ){ - yymsp[-4].minor.yy454->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy402, yymsp[-1].minor.yy590); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); + if( yymsp[-4].minor.yy590 ){ + yymsp[-4].minor.yy590->x.pList = pList; if( ALWAYS(pList->nExpr) ){ - yymsp[-4].minor.yy454->flags |= pList->a[0].pExpr->flags & EP_Propagate; + yymsp[-4].minor.yy590->flags |= pList->a[0].pExpr->flags & EP_Propagate; } }else{ sqlite3ExprListDelete(pParse->db, pList); @@ -176949,7 +178726,7 @@ static YYACTIONTYPE yy_reduce( } break; case 197: /* expr ::= expr AND expr */ -{yymsp[-2].minor.yy454=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} +{yymsp[-2].minor.yy590=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy590,yymsp[0].minor.yy590);} break; case 198: /* expr ::= expr OR expr */ case 199: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==199); @@ -176958,7 +178735,7 @@ static YYACTIONTYPE yy_reduce( case 202: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==202); case 203: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==203); case 204: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==204); -{yymsp[-2].minor.yy454=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} +{yymsp[-2].minor.yy590=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy590,yymsp[0].minor.yy590);} break; case 205: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} @@ -176968,11 +178745,11 @@ static YYACTIONTYPE yy_reduce( ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000; yymsp[-1].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy454); - yymsp[-2].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); - if( bNot ) yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy454, 0); - if( yymsp[-2].minor.yy454 ) yymsp[-2].minor.yy454->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy590); + yymsp[-2].minor.yy590 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + if( bNot ) yymsp[-2].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy590, 0); + if( yymsp[-2].minor.yy590 ) yymsp[-2].minor.yy590->flags |= EP_InfixFunc; } break; case 207: /* expr ::= expr likeop expr ESCAPE expr */ @@ -176980,91 +178757,91 @@ static YYACTIONTYPE yy_reduce( ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000; yymsp[-3].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); - if( bNot ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); - if( yymsp[-4].minor.yy454 ) yymsp[-4].minor.yy454->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy590); + yymsp[-4].minor.yy590 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); + if( bNot ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); + if( yymsp[-4].minor.yy590 ) yymsp[-4].minor.yy590->flags |= EP_InfixFunc; } break; case 208: /* expr ::= expr ISNULL|NOTNULL */ -{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy454,0);} +{yymsp[-1].minor.yy590 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy590,0);} break; case 209: /* expr ::= expr NOT NULL */ -{yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy454,0);} +{yymsp[-2].minor.yy590 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy590,0);} break; case 210: /* expr ::= expr IS expr */ { - yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-2].minor.yy454, TK_ISNULL); + yymsp[-2].minor.yy590 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-2].minor.yy590, TK_ISNULL); } break; case 211: /* expr ::= expr IS NOT expr */ { - yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-3].minor.yy454, TK_NOTNULL); + yymsp[-3].minor.yy590 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-3].minor.yy590, TK_NOTNULL); } break; case 212: /* expr ::= expr IS NOT DISTINCT FROM expr */ { - yymsp[-5].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-5].minor.yy454, TK_ISNULL); + yymsp[-5].minor.yy590 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-5].minor.yy590, TK_ISNULL); } break; case 213: /* expr ::= expr IS DISTINCT FROM expr */ { - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy454,yymsp[0].minor.yy454); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-4].minor.yy454, TK_NOTNULL); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy590,yymsp[0].minor.yy590); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy590, yymsp[-4].minor.yy590, TK_NOTNULL); } break; case 214: /* expr ::= NOT expr */ case 215: /* expr ::= BITNOT expr */ yytestcase(yyruleno==215); -{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy454, 0);/*A-overwrites-B*/} +{yymsp[-1].minor.yy590 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy590, 0);/*A-overwrites-B*/} break; case 216: /* expr ::= PLUS|MINUS expr */ { - Expr *p = yymsp[0].minor.yy454; + Expr *p = yymsp[0].minor.yy590; u8 op = yymsp[-1].major + (TK_UPLUS-TK_PLUS); assert( TK_UPLUS>TK_PLUS ); assert( TK_UMINUS == TK_MINUS + (TK_UPLUS - TK_PLUS) ); if( p && p->op==TK_UPLUS ){ p->op = op; - yymsp[-1].minor.yy454 = p; + yymsp[-1].minor.yy590 = p; }else{ - yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, op, p, 0); + yymsp[-1].minor.yy590 = sqlite3PExpr(pParse, op, p, 0); /*A-overwrites-B*/ } } break; case 217: /* expr ::= expr PTR expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy454); - pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy454); - yylhsminor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy590); + pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy590); + yylhsminor.yy590 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); } - yymsp[-2].minor.yy454 = yylhsminor.yy454; + yymsp[-2].minor.yy590 = yylhsminor.yy590; break; case 218: /* between_op ::= BETWEEN */ case 221: /* in_op ::= IN */ yytestcase(yyruleno==221); -{yymsp[0].minor.yy144 = 0;} +{yymsp[0].minor.yy502 = 0;} break; case 220: /* expr ::= expr between_op expr AND expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy454, 0); - if( yymsp[-4].minor.yy454 ){ - yymsp[-4].minor.yy454->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy590); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy590); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy590, 0); + if( yymsp[-4].minor.yy590 ){ + yymsp[-4].minor.yy590->x.pList = pList; }else{ sqlite3ExprListDelete(pParse->db, pList); } - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } break; case 223: /* expr ::= expr in_op LP exprlist RP */ { - if( yymsp[-1].minor.yy14==0 ){ + if( yymsp[-1].minor.yy402==0 ){ /* Expressions of the form ** ** expr1 IN () @@ -177073,110 +178850,110 @@ static YYACTIONTYPE yy_reduce( ** simplify to constants 0 (false) and 1 (true), respectively, ** regardless of the value of expr1. */ - sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy454); - yymsp[-4].minor.yy454 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy144 ? "true" : "false"); - if( yymsp[-4].minor.yy454 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy454); - }else{ - Expr *pRHS = yymsp[-1].minor.yy14->a[0].pExpr; - if( yymsp[-1].minor.yy14->nExpr==1 && sqlite3ExprIsConstant(pParse,pRHS) && yymsp[-4].minor.yy454->op!=TK_VECTOR ){ - yymsp[-1].minor.yy14->a[0].pExpr = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); + sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy590); + yymsp[-4].minor.yy590 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy502 ? "true" : "false"); + if( yymsp[-4].minor.yy590 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy590); + }else{ + Expr *pRHS = yymsp[-1].minor.yy402->a[0].pExpr; + if( yymsp[-1].minor.yy402->nExpr==1 && sqlite3ExprIsConstant(pParse,pRHS) && yymsp[-4].minor.yy590->op!=TK_VECTOR ){ + yymsp[-1].minor.yy402->a[0].pExpr = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy402); pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy454, pRHS); - }else if( yymsp[-1].minor.yy14->nExpr==1 && pRHS->op==TK_SELECT ){ - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pRHS->x.pSelect); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy590, pRHS); + }else if( yymsp[-1].minor.yy402->nExpr==1 && pRHS->op==TK_SELECT ){ + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, pRHS->x.pSelect); pRHS->x.pSelect = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); - }else{ - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - if( yymsp[-4].minor.yy454==0 ){ - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); - }else if( yymsp[-4].minor.yy454->pLeft->op==TK_VECTOR ){ - int nExpr = yymsp[-4].minor.yy454->pLeft->x.pList->nExpr; - Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy14); + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy402); + }else{ + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + if( yymsp[-4].minor.yy590==0 ){ + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy402); + }else if( yymsp[-4].minor.yy590->pLeft->op==TK_VECTOR ){ + int nExpr = yymsp[-4].minor.yy590->pLeft->x.pList->nExpr; + Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy402); if( pSelectRHS ){ parserDoubleLinkSelect(pParse, pSelectRHS); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelectRHS); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, pSelectRHS); } }else{ - yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy14; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); + yymsp[-4].minor.yy590->x.pList = yymsp[-1].minor.yy402; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy590); } } - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } } break; case 224: /* expr ::= LP select RP */ { - yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); - sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy454, yymsp[-1].minor.yy555); + yymsp[-2].minor.yy590 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy590, yymsp[-1].minor.yy637); } break; case 225: /* expr ::= expr in_op LP select RP */ { - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, yymsp[-1].minor.yy555); - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, yymsp[-1].minor.yy637); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } break; case 226: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0); - if( yymsp[0].minor.yy14 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy14); - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelect); - if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[0].minor.yy402 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy402); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy590, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy590, pSelect); + if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy590, 0); } break; case 227: /* expr ::= EXISTS LP select RP */ { Expr *p; - p = yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); - sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy555); + p = yymsp[-3].minor.yy590 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); + sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy637); } break; case 228: /* expr ::= CASE case_operand case_exprlist case_else END */ { - yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy454, 0); - if( yymsp[-4].minor.yy454 ){ - yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy454 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454) : yymsp[-2].minor.yy14; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy590, 0); + if( yymsp[-4].minor.yy590 ){ + yymsp[-4].minor.yy590->x.pList = yymsp[-1].minor.yy590 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy402,yymsp[-1].minor.yy590) : yymsp[-2].minor.yy402; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy590); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy14); - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); + sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy402); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy590); } } break; case 229: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); - yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[0].minor.yy454); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402, yymsp[-2].minor.yy590); + yymsp[-4].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy402, yymsp[0].minor.yy590); } break; case 230: /* case_exprlist ::= WHEN expr THEN expr */ { - yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); - yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14, yymsp[0].minor.yy454); + yymsp[-3].minor.yy402 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy590); + yymsp[-3].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy402, yymsp[0].minor.yy590); } break; case 235: /* nexprlist ::= nexprlist COMMA expr */ -{yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[0].minor.yy454);} +{yymsp[-2].minor.yy402 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy402,yymsp[0].minor.yy590);} break; case 236: /* nexprlist ::= expr */ -{yymsp[0].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy454); /*A-overwrites-Y*/} +{yymsp[0].minor.yy402 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy590); /*A-overwrites-Y*/} break; case 238: /* paren_exprlist ::= LP exprlist RP */ case 243: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==243); -{yymsp[-2].minor.yy14 = yymsp[-1].minor.yy14;} +{yymsp[-2].minor.yy402 = yymsp[-1].minor.yy402;} break; case 239: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, - sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy14, yymsp[-10].minor.yy144, - &yymsp[-11].minor.yy0, yymsp[0].minor.yy454, SQLITE_SO_ASC, yymsp[-8].minor.yy144, SQLITE_IDXTYPE_APPDEF); + sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy402, yymsp[-10].minor.yy502, + &yymsp[-11].minor.yy0, yymsp[0].minor.yy590, SQLITE_SO_ASC, yymsp[-8].minor.yy502, SQLITE_IDXTYPE_APPDEF); if( IN_RENAME_OBJECT && pParse->pNewIndex ){ sqlite3RenameTokenMap(pParse, pParse->pNewIndex->zName, &yymsp[-4].minor.yy0); } @@ -177184,29 +178961,29 @@ static YYACTIONTYPE yy_reduce( break; case 240: /* uniqueflag ::= UNIQUE */ case 282: /* raisetype ::= ABORT */ yytestcase(yyruleno==282); -{yymsp[0].minor.yy144 = OE_Abort;} +{yymsp[0].minor.yy502 = OE_Abort;} break; case 241: /* uniqueflag ::= */ -{yymsp[1].minor.yy144 = OE_None;} +{yymsp[1].minor.yy502 = OE_None;} break; case 244: /* eidlist ::= eidlist COMMA nm collate sortorder */ { - yymsp[-4].minor.yy14 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); + yymsp[-4].minor.yy402 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy402, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy502, yymsp[0].minor.yy502); } break; case 245: /* eidlist ::= nm collate sortorder */ { - yymsp[-2].minor.yy14 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); /*A-overwrites-Y*/ + yymsp[-2].minor.yy402 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy502, yymsp[0].minor.yy502); /*A-overwrites-Y*/ } break; case 248: /* cmd ::= DROP INDEX ifexists fullname */ -{sqlite3DropIndex(pParse, yymsp[0].minor.yy203, yymsp[-1].minor.yy144);} +{sqlite3DropIndex(pParse, yymsp[0].minor.yy563, yymsp[-1].minor.yy502);} break; case 249: /* cmd ::= VACUUM vinto */ -{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy454);} +{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy590);} break; case 250: /* cmd ::= VACUUM nm vinto */ -{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy454);} +{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy590);} break; case 253: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} @@ -177228,50 +179005,50 @@ static YYACTIONTYPE yy_reduce( Token all; all.z = yymsp[-3].minor.yy0.z; all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; - sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy427, &all); + sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy319, &all); } break; case 261: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { - sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy144, yymsp[-4].minor.yy286.a, yymsp[-4].minor.yy286.b, yymsp[-2].minor.yy203, yymsp[0].minor.yy454, yymsp[-10].minor.yy144, yymsp[-8].minor.yy144); + sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy502, yymsp[-4].minor.yy28.a, yymsp[-4].minor.yy28.b, yymsp[-2].minor.yy563, yymsp[0].minor.yy590, yymsp[-10].minor.yy502, yymsp[-8].minor.yy502); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ } break; case 262: /* trigger_time ::= BEFORE|AFTER */ -{ yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/ } +{ yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-X*/ } break; case 263: /* trigger_time ::= INSTEAD OF */ -{ yymsp[-1].minor.yy144 = TK_INSTEAD;} +{ yymsp[-1].minor.yy502 = TK_INSTEAD;} break; case 264: /* trigger_time ::= */ -{ yymsp[1].minor.yy144 = TK_BEFORE; } +{ yymsp[1].minor.yy502 = TK_BEFORE; } break; case 265: /* trigger_event ::= DELETE|INSERT */ case 266: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==266); -{yymsp[0].minor.yy286.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy286.b = 0;} +{yymsp[0].minor.yy28.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy28.b = 0;} break; case 267: /* trigger_event ::= UPDATE OF idlist */ -{yymsp[-2].minor.yy286.a = TK_UPDATE; yymsp[-2].minor.yy286.b = yymsp[0].minor.yy132;} +{yymsp[-2].minor.yy28.a = TK_UPDATE; yymsp[-2].minor.yy28.b = yymsp[0].minor.yy204;} break; case 268: /* when_clause ::= */ case 287: /* key_opt ::= */ yytestcase(yyruleno==287); -{ yymsp[1].minor.yy454 = 0; } +{ yymsp[1].minor.yy590 = 0; } break; case 269: /* when_clause ::= WHEN expr */ case 288: /* key_opt ::= KEY expr */ yytestcase(yyruleno==288); -{ yymsp[-1].minor.yy454 = yymsp[0].minor.yy454; } +{ yymsp[-1].minor.yy590 = yymsp[0].minor.yy590; } break; case 270: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { - assert( yymsp[-2].minor.yy427!=0 ); - yymsp[-2].minor.yy427->pLast->pNext = yymsp[-1].minor.yy427; - yymsp[-2].minor.yy427->pLast = yymsp[-1].minor.yy427; + assert( yymsp[-2].minor.yy319!=0 ); + yymsp[-2].minor.yy319->pLast->pNext = yymsp[-1].minor.yy319; + yymsp[-2].minor.yy319->pLast = yymsp[-1].minor.yy319; } break; case 271: /* trigger_cmd_list ::= trigger_cmd SEMI */ { - assert( yymsp[-1].minor.yy427!=0 ); - yymsp[-1].minor.yy427->pLast = yymsp[-1].minor.yy427; + assert( yymsp[-1].minor.yy319!=0 ); + yymsp[-1].minor.yy319->pLast = yymsp[-1].minor.yy319; } break; case 272: /* trnm ::= nm DOT nm */ @@ -177297,58 +179074,58 @@ static YYACTIONTYPE yy_reduce( } break; case 275: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ -{yylhsminor.yy427 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy203, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454, yymsp[-7].minor.yy144, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy168);} - yymsp[-8].minor.yy427 = yylhsminor.yy427; +{yylhsminor.yy319 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy563, yymsp[-3].minor.yy402, yymsp[-1].minor.yy590, yymsp[-7].minor.yy502, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy342);} + yymsp[-8].minor.yy319 = yylhsminor.yy319; break; case 276: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ { - yylhsminor.yy427 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy132,yymsp[-2].minor.yy555,yymsp[-6].minor.yy144,yymsp[-1].minor.yy122,yymsp[-7].minor.yy168,yymsp[0].minor.yy168);/*yylhsminor.yy427-overwrites-yymsp[-6].minor.yy144*/ + yylhsminor.yy319 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy204,yymsp[-2].minor.yy637,yymsp[-6].minor.yy502,yymsp[-1].minor.yy403,yymsp[-7].minor.yy342,yymsp[0].minor.yy342);/*yylhsminor.yy319-overwrites-yymsp[-6].minor.yy502*/ } - yymsp[-7].minor.yy427 = yylhsminor.yy427; + yymsp[-7].minor.yy319 = yylhsminor.yy319; break; case 277: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -{yylhsminor.yy427 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy454, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy168);} - yymsp[-5].minor.yy427 = yylhsminor.yy427; +{yylhsminor.yy319 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy590, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy342);} + yymsp[-5].minor.yy319 = yylhsminor.yy319; break; case 278: /* trigger_cmd ::= scanpt select scanpt */ -{yylhsminor.yy427 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy555, yymsp[-2].minor.yy168, yymsp[0].minor.yy168); /*yylhsminor.yy427-overwrites-yymsp[-1].minor.yy555*/} - yymsp[-2].minor.yy427 = yylhsminor.yy427; +{yylhsminor.yy319 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy637, yymsp[-2].minor.yy342, yymsp[0].minor.yy342); /*yylhsminor.yy319-overwrites-yymsp[-1].minor.yy637*/} + yymsp[-2].minor.yy319 = yylhsminor.yy319; break; case 279: /* expr ::= RAISE LP IGNORE RP */ { - yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); - if( yymsp[-3].minor.yy454 ){ - yymsp[-3].minor.yy454->affExpr = OE_Ignore; + yymsp[-3].minor.yy590 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); + if( yymsp[-3].minor.yy590 ){ + yymsp[-3].minor.yy590->affExpr = OE_Ignore; } } break; - case 280: /* expr ::= RAISE LP raisetype COMMA nm RP */ + case 280: /* expr ::= RAISE LP raisetype COMMA expr RP */ { - yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); - if( yymsp[-5].minor.yy454 ) { - yymsp[-5].minor.yy454->affExpr = (char)yymsp[-3].minor.yy144; + yymsp[-5].minor.yy590 = sqlite3PExpr(pParse, TK_RAISE, yymsp[-1].minor.yy590, 0); + if( yymsp[-5].minor.yy590 ) { + yymsp[-5].minor.yy590->affExpr = (char)yymsp[-3].minor.yy502; } } break; case 281: /* raisetype ::= ROLLBACK */ -{yymsp[0].minor.yy144 = OE_Rollback;} +{yymsp[0].minor.yy502 = OE_Rollback;} break; case 283: /* raisetype ::= FAIL */ -{yymsp[0].minor.yy144 = OE_Fail;} +{yymsp[0].minor.yy502 = OE_Fail;} break; case 284: /* cmd ::= DROP TRIGGER ifexists fullname */ { - sqlite3DropTrigger(pParse,yymsp[0].minor.yy203,yymsp[-1].minor.yy144); + sqlite3DropTrigger(pParse,yymsp[0].minor.yy563,yymsp[-1].minor.yy502); } break; case 285: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { - sqlite3Attach(pParse, yymsp[-3].minor.yy454, yymsp[-1].minor.yy454, yymsp[0].minor.yy454); + sqlite3Attach(pParse, yymsp[-3].minor.yy590, yymsp[-1].minor.yy590, yymsp[0].minor.yy590); } break; case 286: /* cmd ::= DETACH database_kw_opt expr */ { - sqlite3Detach(pParse, yymsp[0].minor.yy454); + sqlite3Detach(pParse, yymsp[0].minor.yy590); } break; case 289: /* cmd ::= REINDEX */ @@ -177365,7 +179142,7 @@ static YYACTIONTYPE yy_reduce( break; case 293: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { - sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy203,&yymsp[0].minor.yy0); + sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy563,&yymsp[0].minor.yy0); } break; case 294: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ @@ -177376,18 +179153,18 @@ static YYACTIONTYPE yy_reduce( break; case 295: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ { - sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy203, &yymsp[0].minor.yy0); + sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy563, &yymsp[0].minor.yy0); } break; case 296: /* add_column_fullname ::= fullname */ { disableLookaside(pParse); - sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy203); + sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy563); } break; case 297: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ { - sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy203, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy563, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; case 298: /* cmd ::= create_vtab */ @@ -177398,7 +179175,7 @@ static YYACTIONTYPE yy_reduce( break; case 300: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { - sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy144); + sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy502); } break; case 301: /* vtabarg ::= */ @@ -177411,20 +179188,20 @@ static YYACTIONTYPE yy_reduce( break; case 305: /* with ::= WITH wqlist */ case 306: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==306); -{ sqlite3WithPush(pParse, yymsp[0].minor.yy59, 1); } +{ sqlite3WithPush(pParse, yymsp[0].minor.yy125, 1); } break; case 307: /* wqas ::= AS */ -{yymsp[0].minor.yy462 = M10d_Any;} +{yymsp[0].minor.yy444 = M10d_Any;} break; case 308: /* wqas ::= AS MATERIALIZED */ -{yymsp[-1].minor.yy462 = M10d_Yes;} +{yymsp[-1].minor.yy444 = M10d_Yes;} break; case 309: /* wqas ::= AS NOT MATERIALIZED */ -{yymsp[-2].minor.yy462 = M10d_No;} +{yymsp[-2].minor.yy444 = M10d_No;} break; case 310: /* wqitem ::= withnm eidlist_opt wqas LP select RP */ { - yymsp[-5].minor.yy67 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy555, yymsp[-3].minor.yy462); /*A-overwrites-X*/ + yymsp[-5].minor.yy361 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy402, yymsp[-1].minor.yy637, yymsp[-3].minor.yy444); /*A-overwrites-X*/ } break; case 311: /* withnm ::= nm */ @@ -177432,160 +179209,160 @@ static YYACTIONTYPE yy_reduce( break; case 312: /* wqlist ::= wqitem */ { - yymsp[0].minor.yy59 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy67); /*A-overwrites-X*/ + yymsp[0].minor.yy125 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy361); /*A-overwrites-X*/ } break; case 313: /* wqlist ::= wqlist COMMA wqitem */ { - yymsp[-2].minor.yy59 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy59, yymsp[0].minor.yy67); + yymsp[-2].minor.yy125 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy125, yymsp[0].minor.yy361); } break; case 314: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { - assert( yymsp[0].minor.yy211!=0 ); - sqlite3WindowChain(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy211); - yymsp[0].minor.yy211->pNextWin = yymsp[-2].minor.yy211; - yylhsminor.yy211 = yymsp[0].minor.yy211; + assert( yymsp[0].minor.yy483!=0 ); + sqlite3WindowChain(pParse, yymsp[0].minor.yy483, yymsp[-2].minor.yy483); + yymsp[0].minor.yy483->pNextWin = yymsp[-2].minor.yy483; + yylhsminor.yy483 = yymsp[0].minor.yy483; } - yymsp[-2].minor.yy211 = yylhsminor.yy211; + yymsp[-2].minor.yy483 = yylhsminor.yy483; break; case 315: /* windowdefn ::= nm AS LP window RP */ { - if( ALWAYS(yymsp[-1].minor.yy211) ){ - yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); + if( ALWAYS(yymsp[-1].minor.yy483) ){ + yymsp[-1].minor.yy483->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); } - yylhsminor.yy211 = yymsp[-1].minor.yy211; + yylhsminor.yy483 = yymsp[-1].minor.yy483; } - yymsp[-4].minor.yy211 = yylhsminor.yy211; + yymsp[-4].minor.yy483 = yylhsminor.yy483; break; case 316: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { - yymsp[-4].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, 0); + yymsp[-4].minor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, yymsp[-2].minor.yy402, yymsp[-1].minor.yy402, 0); } break; case 317: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { - yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, &yymsp[-5].minor.yy0); + yylhsminor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, yymsp[-2].minor.yy402, yymsp[-1].minor.yy402, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy211 = yylhsminor.yy211; + yymsp[-5].minor.yy483 = yylhsminor.yy483; break; case 318: /* window ::= ORDER BY sortlist frame_opt */ { - yymsp[-3].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, 0); + yymsp[-3].minor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, 0, yymsp[-1].minor.yy402, 0); } break; case 319: /* window ::= nm ORDER BY sortlist frame_opt */ { - yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0); + yylhsminor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, 0, yymsp[-1].minor.yy402, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy211 = yylhsminor.yy211; + yymsp[-4].minor.yy483 = yylhsminor.yy483; break; case 320: /* window ::= nm frame_opt */ { - yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, 0, &yymsp[-1].minor.yy0); + yylhsminor.yy483 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy483, 0, 0, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy211 = yylhsminor.yy211; + yymsp[-1].minor.yy483 = yylhsminor.yy483; break; case 321: /* frame_opt ::= */ { - yymsp[1].minor.yy211 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); + yymsp[1].minor.yy483 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; case 322: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { - yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy144, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy462); + yylhsminor.yy483 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy502, yymsp[-1].minor.yy205.eType, yymsp[-1].minor.yy205.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy444); } - yymsp[-2].minor.yy211 = yylhsminor.yy211; + yymsp[-2].minor.yy483 = yylhsminor.yy483; break; case 323: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { - yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy144, yymsp[-3].minor.yy509.eType, yymsp[-3].minor.yy509.pExpr, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, yymsp[0].minor.yy462); + yylhsminor.yy483 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy502, yymsp[-3].minor.yy205.eType, yymsp[-3].minor.yy205.pExpr, yymsp[-1].minor.yy205.eType, yymsp[-1].minor.yy205.pExpr, yymsp[0].minor.yy444); } - yymsp[-5].minor.yy211 = yylhsminor.yy211; + yymsp[-5].minor.yy483 = yylhsminor.yy483; break; case 325: /* frame_bound_s ::= frame_bound */ case 327: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==327); -{yylhsminor.yy509 = yymsp[0].minor.yy509;} - yymsp[0].minor.yy509 = yylhsminor.yy509; +{yylhsminor.yy205 = yymsp[0].minor.yy205;} + yymsp[0].minor.yy205 = yylhsminor.yy205; break; case 326: /* frame_bound_s ::= UNBOUNDED PRECEDING */ case 328: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==328); case 330: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==330); -{yylhsminor.yy509.eType = yymsp[-1].major; yylhsminor.yy509.pExpr = 0;} - yymsp[-1].minor.yy509 = yylhsminor.yy509; +{yylhsminor.yy205.eType = yymsp[-1].major; yylhsminor.yy205.pExpr = 0;} + yymsp[-1].minor.yy205 = yylhsminor.yy205; break; case 329: /* frame_bound ::= expr PRECEDING|FOLLOWING */ -{yylhsminor.yy509.eType = yymsp[0].major; yylhsminor.yy509.pExpr = yymsp[-1].minor.yy454;} - yymsp[-1].minor.yy509 = yylhsminor.yy509; +{yylhsminor.yy205.eType = yymsp[0].major; yylhsminor.yy205.pExpr = yymsp[-1].minor.yy590;} + yymsp[-1].minor.yy205 = yylhsminor.yy205; break; case 331: /* frame_exclude_opt ::= */ -{yymsp[1].minor.yy462 = 0;} +{yymsp[1].minor.yy444 = 0;} break; case 332: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ -{yymsp[-1].minor.yy462 = yymsp[0].minor.yy462;} +{yymsp[-1].minor.yy444 = yymsp[0].minor.yy444;} break; case 333: /* frame_exclude ::= NO OTHERS */ case 334: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==334); -{yymsp[-1].minor.yy462 = yymsp[-1].major; /*A-overwrites-X*/} +{yymsp[-1].minor.yy444 = yymsp[-1].major; /*A-overwrites-X*/} break; case 335: /* frame_exclude ::= GROUP|TIES */ -{yymsp[0].minor.yy462 = yymsp[0].major; /*A-overwrites-X*/} +{yymsp[0].minor.yy444 = yymsp[0].major; /*A-overwrites-X*/} break; case 336: /* window_clause ::= WINDOW windowdefn_list */ -{ yymsp[-1].minor.yy211 = yymsp[0].minor.yy211; } +{ yymsp[-1].minor.yy483 = yymsp[0].minor.yy483; } break; case 337: /* filter_over ::= filter_clause over_clause */ { - if( yymsp[0].minor.yy211 ){ - yymsp[0].minor.yy211->pFilter = yymsp[-1].minor.yy454; + if( yymsp[0].minor.yy483 ){ + yymsp[0].minor.yy483->pFilter = yymsp[-1].minor.yy590; }else{ - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy590); } - yylhsminor.yy211 = yymsp[0].minor.yy211; + yylhsminor.yy483 = yymsp[0].minor.yy483; } - yymsp[-1].minor.yy211 = yylhsminor.yy211; + yymsp[-1].minor.yy483 = yylhsminor.yy483; break; case 338: /* filter_over ::= over_clause */ { - yylhsminor.yy211 = yymsp[0].minor.yy211; + yylhsminor.yy483 = yymsp[0].minor.yy483; } - yymsp[0].minor.yy211 = yylhsminor.yy211; + yymsp[0].minor.yy483 = yylhsminor.yy483; break; case 339: /* filter_over ::= filter_clause */ { - yylhsminor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yylhsminor.yy211 ){ - yylhsminor.yy211->eFrmType = TK_FILTER; - yylhsminor.yy211->pFilter = yymsp[0].minor.yy454; + yylhsminor.yy483 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yylhsminor.yy483 ){ + yylhsminor.yy483->eFrmType = TK_FILTER; + yylhsminor.yy483->pFilter = yymsp[0].minor.yy590; }else{ - sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy454); + sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy590); } } - yymsp[0].minor.yy211 = yylhsminor.yy211; + yymsp[0].minor.yy483 = yylhsminor.yy483; break; case 340: /* over_clause ::= OVER LP window RP */ { - yymsp[-3].minor.yy211 = yymsp[-1].minor.yy211; - assert( yymsp[-3].minor.yy211!=0 ); + yymsp[-3].minor.yy483 = yymsp[-1].minor.yy483; + assert( yymsp[-3].minor.yy483!=0 ); } break; case 341: /* over_clause ::= OVER nm */ { - yymsp[-1].minor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yymsp[-1].minor.yy211 ){ - yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); + yymsp[-1].minor.yy483 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yymsp[-1].minor.yy483 ){ + yymsp[-1].minor.yy483->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); } } break; case 342: /* filter_clause ::= FILTER LP WHERE expr RP */ -{ yymsp[-4].minor.yy454 = yymsp[-1].minor.yy454; } +{ yymsp[-4].minor.yy590 = yymsp[-1].minor.yy590; } break; case 343: /* term ::= QNUMBER */ { - yylhsminor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); - sqlite3DequoteNumber(pParse, yylhsminor.yy454); + yylhsminor.yy590=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); + sqlite3DequoteNumber(pParse, yylhsminor.yy590); } - yymsp[0].minor.yy454 = yylhsminor.yy454; + yymsp[0].minor.yy590 = yylhsminor.yy590; break; default: /* (344) input ::= cmdlist */ yytestcase(yyruleno==344); @@ -177715,7 +179492,7 @@ static void yy_syntax_error( UNUSED_PARAMETER(yymajor); /* Silence some compiler warnings */ if( TOKEN.z[0] ){ - sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &TOKEN); + parserSyntaxError(pParse, &TOKEN); }else{ sqlite3ErrorMsg(pParse, "incomplete input"); } @@ -178766,7 +180543,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ case CC_MINUS: { if( z[1]=='-' ){ for(i=2; (c=z[i])!=0 && c!='\n'; i++){} - *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ + *tokenType = TK_COMMENT; return i; }else if( z[1]=='>' ){ *tokenType = TK_PTR; @@ -178802,7 +180579,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ } for(i=3, c=z[2]; (c!='*' || z[i]!='/') && (c=z[i])!=0; i++){} if( c ) i++; - *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ + *tokenType = TK_COMMENT; return i; } case CC_PERCENT: { @@ -179131,12 +180908,12 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ if( tokenType>=TK_WINDOW ){ assert( tokenType==TK_SPACE || tokenType==TK_OVER || tokenType==TK_FILTER || tokenType==TK_ILLEGAL || tokenType==TK_WINDOW - || tokenType==TK_QNUMBER + || tokenType==TK_QNUMBER || tokenType==TK_COMMENT ); #else if( tokenType>=TK_SPACE ){ assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL - || tokenType==TK_QNUMBER + || tokenType==TK_QNUMBER || tokenType==TK_COMMENT ); #endif /* SQLITE_OMIT_WINDOWFUNC */ if( AtomicLoad(&db->u1.isInterrupted) ){ @@ -179170,6 +180947,9 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ assert( n==6 ); tokenType = analyzeFilterKeyword((const u8*)&zSql[6], lastTokenParsed); #endif /* SQLITE_OMIT_WINDOWFUNC */ + }else if( tokenType==TK_COMMENT && (db->flags & SQLITE_Comments)!=0 ){ + zSql += n; + continue; }else if( tokenType!=TK_QNUMBER ){ Token x; x.z = zSql; @@ -179206,7 +180986,9 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ if( pParse->zErrMsg==0 ){ pParse->zErrMsg = sqlite3MPrintf(db, "%s", sqlite3ErrStr(pParse->rc)); } - sqlite3_log(pParse->rc, "%s in \"%s\"", pParse->zErrMsg, pParse->zTail); + if( (pParse->prepFlags & SQLITE_PREPARE_DONT_LOG)==0 ){ + sqlite3_log(pParse->rc, "%s in \"%s\"", pParse->zErrMsg, pParse->zTail); + } nErr++; } pParse->zTail = zSql; @@ -179274,6 +181056,7 @@ SQLITE_PRIVATE char *sqlite3Normalize( n = sqlite3GetToken((unsigned char*)zSql+i, &tokenType); if( NEVER(n<=0) ) break; switch( tokenType ){ + case TK_COMMENT: case TK_SPACE: { break; } @@ -179915,32 +181698,6 @@ SQLITE_API char *sqlite3_temp_directory = 0; */ SQLITE_API char *sqlite3_data_directory = 0; -/* -** Determine whether or not high-precision (long double) floating point -** math works correctly on CPU currently running. -*/ -static SQLITE_NOINLINE int hasHighPrecisionDouble(int rc){ - if( sizeof(LONGDOUBLE_TYPE)<=8 ){ - /* If the size of "long double" is not more than 8, then - ** high-precision math is not possible. */ - return 0; - }else{ - /* Just because sizeof(long double)>8 does not mean that the underlying - ** hardware actually supports high-precision floating point. For example, - ** clearing the 0x100 bit in the floating-point control word on Intel - ** processors will make long double work like double, even though long - ** double takes up more space. The only way to determine if long double - ** actually works is to run an experiment. */ - LONGDOUBLE_TYPE a, b, c; - rc++; - a = 1.0+rc*0.1; - b = 1.0e+18+rc*25.0; - c = a+b; - return b!=c; - } -} - - /* ** Initialize SQLite. ** @@ -180135,13 +181892,6 @@ SQLITE_API int sqlite3_initialize(void){ rc = SQLITE_EXTRA_INIT(0); } #endif - - /* Experimentally determine if high-precision floating point is - ** available. */ -#ifndef SQLITE_OMIT_WSD - sqlite3Config.bUseLongDouble = hasHighPrecisionDouble(rc); -#endif - return rc; } @@ -180556,7 +182306,7 @@ SQLITE_API int sqlite3_config(int op, ...){ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ #ifndef SQLITE_OMIT_LOOKASIDE void *pStart; - sqlite3_int64 szAlloc = sz*(sqlite3_int64)cnt; + sqlite3_int64 szAlloc; int nBig; /* Number of full-size slots */ int nSm; /* Number smaller LOOKASIDE_SMALL-byte slots */ @@ -180575,7 +182325,9 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ */ sz = ROUNDDOWN8(sz); /* IMP: R-33038-09382 */ if( sz<=(int)sizeof(LookasideSlot*) ) sz = 0; + if( sz>65528 ) sz = 65528; if( cnt<0 ) cnt = 0; + szAlloc = (i64)sz*(i64)cnt; if( sz==0 || cnt==0 ){ sz = 0; pStart = 0; @@ -180590,10 +182342,10 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ #ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE if( sz>=LOOKASIDE_SMALL*3 ){ nBig = szAlloc/(3*LOOKASIDE_SMALL+sz); - nSm = (szAlloc - sz*nBig)/LOOKASIDE_SMALL; + nSm = (szAlloc - (i64)sz*(i64)nBig)/LOOKASIDE_SMALL; }else if( sz>=LOOKASIDE_SMALL*2 ){ nBig = szAlloc/(LOOKASIDE_SMALL+sz); - nSm = (szAlloc - sz*nBig)/LOOKASIDE_SMALL; + nSm = (szAlloc - (i64)sz*(i64)nBig)/LOOKASIDE_SMALL; }else #endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */ if( sz>0 ){ @@ -180748,7 +182500,7 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ default: { static const struct { int op; /* The opcode */ - u32 mask; /* Mask of the bit in sqlite3.flags to set/clear */ + u64 mask; /* Mask of the bit in sqlite3.flags to set/clear */ } aFlagOp[] = { { SQLITE_DBCONFIG_ENABLE_FKEY, SQLITE_ForeignKeys }, { SQLITE_DBCONFIG_ENABLE_TRIGGER, SQLITE_EnableTrigger }, @@ -180769,6 +182521,9 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ { SQLITE_DBCONFIG_TRUSTED_SCHEMA, SQLITE_TrustedSchema }, { SQLITE_DBCONFIG_STMT_SCANSTATUS, SQLITE_StmtScanStatus }, { SQLITE_DBCONFIG_REVERSE_SCANORDER, SQLITE_ReverseOrder }, + { SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE, SQLITE_AttachCreate }, + { SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE, SQLITE_AttachWrite }, + { SQLITE_DBCONFIG_ENABLE_COMMENTS, SQLITE_Comments }, }; unsigned int i; rc = SQLITE_ERROR; /* IMP: R-42790-23372 */ @@ -181212,10 +182967,6 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){ sqlite3Error(db, SQLITE_OK); /* Deallocates any cached error strings. */ sqlite3ValueFree(db->pErr); sqlite3CloseExtensions(db); -#if SQLITE_USER_AUTHENTICATION - sqlite3_free(db->auth.zAuthUser); - sqlite3_free(db->auth.zAuthPW); -#endif db->eOpenState = SQLITE_STATE_ERROR; @@ -181682,7 +183433,8 @@ SQLITE_PRIVATE int sqlite3CreateFunc( assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC ); assert( SQLITE_FUNC_DIRECT==SQLITE_DIRECTONLY ); extraFlags = enc & (SQLITE_DETERMINISTIC|SQLITE_DIRECTONLY| - SQLITE_SUBTYPE|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE); + SQLITE_SUBTYPE|SQLITE_INNOCUOUS| + SQLITE_RESULT_SUBTYPE|SQLITE_SELFORDER1); enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY); /* The SQLITE_INNOCUOUS flag is the same bit as SQLITE_FUNC_UNSAFE. But @@ -182649,8 +184401,8 @@ static const int aHardLimit[] = { #if SQLITE_MAX_VDBE_OP<40 # error SQLITE_MAX_VDBE_OP must be at least 40 #endif -#if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>127 -# error SQLITE_MAX_FUNCTION_ARG must be between 0 and 127 +#if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>32767 +# error SQLITE_MAX_FUNCTION_ARG must be between 0 and 32767 #endif #if SQLITE_MAX_ATTACHED<0 || SQLITE_MAX_ATTACHED>125 # error SQLITE_MAX_ATTACHED must be between 0 and 125 @@ -182717,8 +184469,8 @@ SQLITE_API int sqlite3_limit(sqlite3 *db, int limitId, int newLimit){ if( newLimit>=0 ){ /* IMP: R-52476-28732 */ if( newLimit>aHardLimit[limitId] ){ newLimit = aHardLimit[limitId]; /* IMP: R-51463-25634 */ - }else if( newLimit<1 && limitId==SQLITE_LIMIT_LENGTH ){ - newLimit = 1; + }else if( newLimitaLimit[limitId] = newLimit; } @@ -183113,6 +184865,9 @@ static int openDatabase( | SQLITE_EnableTrigger | SQLITE_EnableView | SQLITE_CacheSpill + | SQLITE_AttachCreate + | SQLITE_AttachWrite + | SQLITE_Comments #if !defined(SQLITE_TRUSTED_SCHEMA) || SQLITE_TRUSTED_SCHEMA+0!=0 | SQLITE_TrustedSchema #endif @@ -183237,6 +184992,7 @@ static int openDatabase( if( ((1<<(flags&7)) & 0x46)==0 ){ rc = SQLITE_MISUSE_BKPT; /* IMP: R-18321-05872 */ }else{ + if( zFilename==0 ) zFilename = ":memory:"; rc = sqlite3ParseUri(zVfs, zFilename, &flags, &db->pVfs, &zOpen, &zErrMsg); } if( rc!=SQLITE_OK ){ @@ -184061,7 +185817,6 @@ SQLITE_API int sqlite3_test_control(int op, ...){ /* Invoke these debugging routines so that the compiler does not ** issue "defined but not used" warnings. */ if( x==9999 ){ - sqlite3ShowExpr(0); sqlite3ShowExpr(0); sqlite3ShowExprList(0); sqlite3ShowIdList(0); @@ -184149,6 +185904,18 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } + /* sqlite3_test_control(SQLITE_TESTCTRL_GETOPT, sqlite3 *db, int *N) + ** + ** Write the current optimization settings into *N. A zero bit means that + ** the optimization is on, and a 1 bit means that the optimization is off. + */ + case SQLITE_TESTCTRL_GETOPT: { + sqlite3 *db = va_arg(ap, sqlite3*); + int *pN = va_arg(ap, int*); + *pN = db->dbOptFlags; + break; + } + /* sqlite3_test_control(SQLITE_TESTCTRL_LOCALTIME_FAULT, onoff, xAlt); ** ** If parameter onoff is 1, subsequent calls to localtime() fail. @@ -184380,24 +186147,6 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } -#if !defined(SQLITE_OMIT_WSD) - /* sqlite3_test_control(SQLITE_TESTCTRL_USELONGDOUBLE, int X); - ** - ** X<0 Make no changes to the bUseLongDouble. Just report value. - ** X==0 Disable bUseLongDouble - ** X==1 Enable bUseLongDouble - ** X>=2 Set bUseLongDouble to its default value for this platform - */ - case SQLITE_TESTCTRL_USELONGDOUBLE: { - int b = va_arg(ap, int); - if( b>=2 ) b = hasHighPrecisionDouble(b); - if( b>=0 ) sqlite3Config.bUseLongDouble = b>0; - rc = sqlite3Config.bUseLongDouble!=0; - break; - } -#endif - - #if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD) /* sqlite3_test_control(SQLITE_TESTCTRL_TUNE, id, *piValue) ** @@ -184705,7 +186454,11 @@ SQLITE_API int sqlite3_snapshot_get( if( iDb==0 || iDb>1 ){ Btree *pBt = db->aDb[iDb].pBt; if( SQLITE_TXN_WRITE!=sqlite3BtreeTxnState(pBt) ){ + Pager *pPager = sqlite3BtreePager(pBt); + i64 dummy = 0; + sqlite3PagerSnapshotOpen(pPager, (sqlite3_snapshot*)&dummy); rc = sqlite3BtreeBeginTrans(pBt, 0, 0); + sqlite3PagerSnapshotOpen(pPager, 0); if( rc==SQLITE_OK ){ rc = sqlite3PagerSnapshotGet(sqlite3BtreePager(pBt), ppSnapshot); } @@ -186420,6 +188173,7 @@ SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext( SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(Fts3Cursor *, Fts3Expr *, int iCol, char **); SQLITE_PRIVATE int sqlite3Fts3MsrOvfl(Fts3Cursor *, Fts3MultiSegReader *, int *); SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr); +SQLITE_PRIVATE int sqlite3Fts3MsrCancel(Fts3Cursor*, Fts3Expr*); /* fts3_tokenize_vtab.c */ SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *, void(*xDestroy)(void*)); @@ -188495,10 +190249,15 @@ static int fts3PoslistPhraseMerge( if( *p1==POS_COLUMN ){ p1++; p1 += fts3GetVarint32(p1, &iCol1); + /* iCol1==0 indicates corruption. Column 0 does not have a POS_COLUMN + ** entry, so this is actually end-of-doclist. */ + if( iCol1==0 ) return 0; } if( *p2==POS_COLUMN ){ p2++; p2 += fts3GetVarint32(p2, &iCol2); + /* As above, iCol2==0 indicates corruption. */ + if( iCol2==0 ) return 0; } while( 1 ){ @@ -191669,7 +193428,7 @@ static int fts3EvalNearTest(Fts3Expr *pExpr, int *pRc){ nTmp += p->pRight->pPhrase->doclist.nList; } nTmp += p->pPhrase->doclist.nList; - aTmp = sqlite3_malloc64(nTmp*2); + aTmp = sqlite3_malloc64(nTmp*2 + FTS3_VARINT_MAX); if( !aTmp ){ *pRc = SQLITE_NOMEM; res = 0; @@ -191933,6 +193692,24 @@ static void fts3EvalRestart( } } +/* +** Expression node pExpr is an MSR phrase. This function restarts pExpr +** so that it is a regular phrase query, not an MSR. SQLITE_OK is returned +** if successful, or an SQLite error code otherwise. +*/ +SQLITE_PRIVATE int sqlite3Fts3MsrCancel(Fts3Cursor *pCsr, Fts3Expr *pExpr){ + int rc = SQLITE_OK; + if( pExpr->bEof==0 ){ + i64 iDocid = pExpr->iDocid; + fts3EvalRestart(pCsr, pExpr, &rc); + while( rc==SQLITE_OK && pExpr->iDocid!=iDocid ){ + fts3EvalNextRow(pCsr, pExpr, &rc); + if( pExpr->bEof ) rc = FTS_CORRUPT_VTAB; + } + } + return rc; +} + /* ** After allocating the Fts3Expr.aMI[] array for each phrase in the ** expression rooted at pExpr, the cursor iterates through all rows matched @@ -192320,7 +194097,7 @@ SQLITE_PRIVATE int sqlite3Fts3Corrupt(){ } #endif -#if !SQLITE_CORE +#if !defined(SQLITE_CORE) /* ** Initialize API pointer table, if required. */ @@ -193222,10 +194999,11 @@ static int getNextString( Fts3PhraseToken *pToken; p = fts3ReallocOrFree(p, nSpace + ii*sizeof(Fts3PhraseToken)); - if( !p ) goto no_mem; - zTemp = fts3ReallocOrFree(zTemp, nTemp + nByte); - if( !zTemp ) goto no_mem; + if( !zTemp || !p ){ + rc = SQLITE_NOMEM; + goto getnextstring_out; + } assert( nToken==ii ); pToken = &((Fts3Phrase *)(&p[1]))->aToken[ii]; @@ -193240,9 +195018,6 @@ static int getNextString( nToken = ii+1; } } - - pModule->xClose(pCursor); - pCursor = 0; } if( rc==SQLITE_DONE ){ @@ -193250,7 +195025,10 @@ static int getNextString( char *zBuf = 0; p = fts3ReallocOrFree(p, nSpace + nToken*sizeof(Fts3PhraseToken) + nTemp); - if( !p ) goto no_mem; + if( !p ){ + rc = SQLITE_NOMEM; + goto getnextstring_out; + } memset(p, 0, (char *)&(((Fts3Phrase *)&p[1])->aToken[0])-(char *)p); p->eType = FTSQUERY_PHRASE; p->pPhrase = (Fts3Phrase *)&p[1]; @@ -193258,11 +195036,9 @@ static int getNextString( p->pPhrase->nToken = nToken; zBuf = (char *)&p->pPhrase->aToken[nToken]; + assert( nTemp==0 || zTemp ); if( zTemp ){ memcpy(zBuf, zTemp, nTemp); - sqlite3_free(zTemp); - }else{ - assert( nTemp==0 ); } for(jj=0; jjpPhrase->nToken; jj++){ @@ -193272,17 +195048,17 @@ static int getNextString( rc = SQLITE_OK; } - *ppExpr = p; - return rc; -no_mem: - + getnextstring_out: if( pCursor ){ pModule->xClose(pCursor); } sqlite3_free(zTemp); - sqlite3_free(p); - *ppExpr = 0; - return SQLITE_NOMEM; + if( rc!=SQLITE_OK ){ + sqlite3_free(p); + p = 0; + } + *ppExpr = p; + return rc; } /* @@ -195476,11 +197252,7 @@ SQLITE_PRIVATE int sqlite3Fts3InitTokenizer( #ifdef SQLITE_TEST -#if defined(INCLUDE_SQLITE_TCL_H) -# include "sqlite_tcl.h" -#else -# include "tcl.h" -#endif +#include "tclsqlite.h" /* #include */ /* @@ -202707,6 +204479,7 @@ static int fts3SnippetNextCandidate(SnippetIter *pIter){ return 1; } + assert( pIter->nSnippet>=0 ); pIter->iCurrent = iStart = iEnd - pIter->nSnippet + 1; for(i=0; inPhrase; i++){ SnippetPhrase *pPhrase = &pIter->aPhrase[i]; @@ -203894,6 +205667,22 @@ static int fts3ExprTermOffsetInit(Fts3Expr *pExpr, int iPhrase, void *ctx){ return rc; } +/* +** If expression pExpr is a phrase expression that uses an MSR query, +** restart it as a regular, non-incremental query. Return SQLITE_OK +** if successful, or an SQLite error code otherwise. +*/ +static int fts3ExprRestartIfCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ + TermOffsetCtx *p = (TermOffsetCtx*)ctx; + int rc = SQLITE_OK; + UNUSED_PARAMETER(iPhrase); + if( pExpr->pPhrase && pExpr->pPhrase->bIncr ){ + rc = sqlite3Fts3MsrCancel(p->pCsr, pExpr); + pExpr->pPhrase->bIncr = 0; + } + return rc; +} + /* ** Implementation of offsets() function. */ @@ -203930,6 +205719,12 @@ SQLITE_PRIVATE void sqlite3Fts3Offsets( sCtx.iDocid = pCsr->iPrevId; sCtx.pCsr = pCsr; + /* If a query restart will be required, do it here, rather than later of + ** after pointers to poslist buffers that may be invalidated by a restart + ** have been saved. */ + rc = sqlite3Fts3ExprIterate(pCsr->pExpr, fts3ExprRestartIfCb, (void*)&sCtx); + if( rc!=SQLITE_OK ) goto offsets_out; + /* Loop through the table columns, appending offset information to ** string-buffer res for each column. */ @@ -207700,7 +209495,9 @@ static u32 jsonLookupStep( zPath++; if( zPath[0]=='"' ){ zKey = zPath + 1; - for(i=1; zPath[i] && zPath[i]!='"'; i++){} + for(i=1; zPath[i] && zPath[i]!='"'; i++){ + if( zPath[i]=='\\' && zPath[i+1]!=0 ) i++; + } nKey = i-1; if( zPath[i] ){ i++; @@ -208710,10 +210507,16 @@ static void jsonExtractFunc( ** NUMBER ==> $[NUMBER] // PG compatible ** LABEL ==> $.LABEL // PG compatible ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience + ** + ** Updated 2024-05-27: If the NUMBER is negative, then PG counts from + ** the right of the array. Hence for negative NUMBER: + ** + ** NUMBER ==> $[#NUMBER] // PG compatible */ jsonStringInit(&jx, ctx); if( sqlite3_value_type(argv[i])==SQLITE_INTEGER ){ jsonAppendRawNZ(&jx, "[", 1); + if( zPath[0]=='-' ) jsonAppendRawNZ(&jx,"#",1); jsonAppendRaw(&jx, zPath, nPath); jsonAppendRawNZ(&jx, "]", 2); }else if( jsonAllAlphanum(zPath, nPath) ){ @@ -214089,8 +215892,8 @@ static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ sqlite3_str_append(pOut, "}", 1); } errCode = sqlite3_str_errcode(pOut); - sqlite3_result_text(ctx, sqlite3_str_finish(pOut), -1, sqlite3_free); sqlite3_result_error_code(ctx, errCode); + sqlite3_result_text(ctx, sqlite3_str_finish(pOut), -1, sqlite3_free); } /* This routine implements an SQL function that returns the "depth" parameter @@ -216606,7 +218409,7 @@ SQLITE_API int sqlite3_rtree_query_callback( ); } -#if !SQLITE_CORE +#ifndef SQLITE_CORE #ifdef _WIN32 __declspec(dllexport) #endif @@ -217197,7 +219000,7 @@ SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db){ return rc; } -#if !SQLITE_CORE +#ifndef SQLITE_CORE #ifdef _WIN32 __declspec(dllexport) #endif @@ -218455,6 +220258,27 @@ struct RbuFrame { u32 iWalFrame; }; +#ifndef UNUSED_PARAMETER +/* +** The following macros are used to suppress compiler warnings and to +** make it clear to human readers when a function parameter is deliberately +** left unused within the body of a function. This usually happens when +** a function is called via a function pointer. For example the +** implementation of an SQL aggregate step callback may not use the +** parameter indicating the number of arguments passed to the aggregate, +** if it knows that this is enforced elsewhere. +** +** When a function parameter is not used at all within the body of a function, +** it is generally named "NotUsed" or "NotUsed2" to make things even clearer. +** However, these macros may also be used to suppress warnings related to +** parameters that may or may not be used depending on compilation options. +** For example those parameters only used in assert() statements. In these +** cases the parameters are named as per the usual conventions. +*/ +#define UNUSED_PARAMETER(x) (void)(x) +#define UNUSED_PARAMETER2(x,y) UNUSED_PARAMETER(x),UNUSED_PARAMETER(y) +#endif + /* ** RBU handle. ** @@ -218506,7 +220330,7 @@ struct sqlite3rbu { int rc; /* Value returned by last rbu_step() call */ char *zErrmsg; /* Error message if rc!=SQLITE_OK */ int nStep; /* Rows processed for current object */ - int nProgress; /* Rows processed for all objects */ + sqlite3_int64 nProgress; /* Rows processed for all objects */ RbuObjIter objiter; /* Iterator for skipping through tbl/idx */ const char *zVfsName; /* Name of automatically created rbu vfs */ rbu_file *pTargetFd; /* File handle open on target db */ @@ -218623,7 +220447,7 @@ static unsigned int rbuDeltaGetInt(const char **pz, int *pLen){ v = (v<<6) + c; } z--; - *pLen -= z - zStart; + *pLen -= (int)(z - zStart); *pz = (char*)z; return v; } @@ -218808,6 +220632,7 @@ static void rbuFossilDeltaFunc( char *aOut; assert( argc==2 ); + UNUSED_PARAMETER(argc); nOrig = sqlite3_value_bytes(argv[0]); aOrig = (const char*)sqlite3_value_blob(argv[0]); @@ -220387,13 +222212,13 @@ static char *rbuObjIterGetIndexWhere(sqlite3rbu *p, RbuObjIter *pIter){ else if( c==')' ){ nParen--; if( nParen==0 ){ - int nSpan = &zSql[i] - pIter->aIdxCol[iIdxCol].zSpan; + int nSpan = (int)(&zSql[i] - pIter->aIdxCol[iIdxCol].zSpan); pIter->aIdxCol[iIdxCol++].nSpan = nSpan; i++; break; } }else if( c==',' && nParen==1 ){ - int nSpan = &zSql[i] - pIter->aIdxCol[iIdxCol].zSpan; + int nSpan = (int)(&zSql[i] - pIter->aIdxCol[iIdxCol].zSpan); pIter->aIdxCol[iIdxCol++].nSpan = nSpan; pIter->aIdxCol[iIdxCol].zSpan = &zSql[i+1]; }else if( c=='"' || c=='\'' || c=='`' ){ @@ -221083,6 +222908,8 @@ static void rbuFileSuffix3(const char *zBase, char *z){ for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){} if( z[i]=='.' && sz>i+4 ) memmove(&z[i+1], &z[sz-3], 4); } +#else + UNUSED_PARAMETER2(zBase,z); #endif } @@ -221667,7 +223494,7 @@ static void rbuSaveState(sqlite3rbu *p, int eStage){ "(%d, %Q), " "(%d, %Q), " "(%d, %d), " - "(%d, %d), " + "(%d, %lld), " "(%d, %lld), " "(%d, %lld), " "(%d, %lld), " @@ -222025,6 +223852,7 @@ static void rbuIndexCntFunc( sqlite3 *db = (rbuIsVacuum(p) ? p->dbRbu : p->dbMain); assert( nVal==1 ); + UNUSED_PARAMETER(nVal); rc = prepareFreeAndCollectError(db, &pStmt, &zErrmsg, sqlite3_mprintf("SELECT count(*) FROM sqlite_schema " @@ -222300,7 +224128,7 @@ SQLITE_API sqlite3rbu *sqlite3rbu_vacuum( ){ if( zTarget==0 ){ return rbuMisuseError(); } if( zState ){ - int n = strlen(zState); + size_t n = strlen(zState); if( n>=7 && 0==memcmp("-vactmp", &zState[n-7], 7) ){ return rbuMisuseError(); } @@ -222517,6 +224345,7 @@ SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *p){ */ static int xDefaultRename(void *pArg, const char *zOld, const char *zNew){ int rc = SQLITE_OK; + UNUSED_PARAMETER(pArg); #if defined(_WIN32_WCE) { LPWSTR zWideOld; @@ -223421,6 +225250,9 @@ static int rbuVfsCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ ** No-op. */ static int rbuVfsGetLastError(sqlite3_vfs *pVfs, int a, char *b){ + UNUSED_PARAMETER(pVfs); + UNUSED_PARAMETER(a); + UNUSED_PARAMETER(b); return 0; } @@ -223819,6 +225651,7 @@ static int statBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ pIdxInfo->orderByConsumed = 1; pIdxInfo->idxNum |= 0x08; } + pIdxInfo->idxFlags |= SQLITE_INDEX_SCAN_HEX; return SQLITE_OK; } @@ -224476,7 +226309,13 @@ SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3 *db){ return SQLITE_OK; } ** ** The data field of sqlite_dbpage table can be updated. The new ** value must be a BLOB which is the correct page size, otherwise the -** update fails. Rows may not be deleted or inserted. +** update fails. INSERT operations also work, and operate as if they +** where REPLACE. The size of the database can be extended by INSERT-ing +** new pages on the end. +** +** Rows may not be deleted. However, doing an INSERT to page number N +** with NULL page data causes the N-th page and all subsequent pages to be +** deleted and the database to be truncated. */ /* #include "sqliteInt.h" ** Requires access to internal data structures ** */ @@ -224499,6 +226338,8 @@ struct DbpageCursor { struct DbpageTable { sqlite3_vtab base; /* Base class. Must be first */ sqlite3 *db; /* The database */ + int iDbTrunc; /* Database to truncate */ + Pgno pgnoTrunc; /* Size to truncate to */ }; /* Columns */ @@ -224507,7 +226348,6 @@ struct DbpageTable { #define DBPAGE_COLUMN_SCHEMA 2 - /* ** Connect to or create a dbpagevfs virtual table. */ @@ -224758,6 +226598,24 @@ static int dbpageRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ return SQLITE_OK; } +/* +** Open write transactions. Since we do not know in advance which database +** files will be written by the sqlite_dbpage virtual table, start a write +** transaction on them all. +** +** Return SQLITE_OK if successful, or an SQLite error code otherwise. +*/ +static int dbpageBeginTrans(DbpageTable *pTab){ + sqlite3 *db = pTab->db; + int rc = SQLITE_OK; + int i; + for(i=0; rc==SQLITE_OK && inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ) rc = sqlite3BtreeBeginTrans(pBt, 1, 0); + } + return rc; +} + static int dbpageUpdate( sqlite3_vtab *pVtab, int argc, @@ -224769,11 +226627,11 @@ static int dbpageUpdate( DbPage *pDbPage = 0; int rc = SQLITE_OK; char *zErr = 0; - const char *zSchema; int iDb; Btree *pBt; Pager *pPager; int szPage; + int isInsert; (void)pRowid; if( pTab->db->flags & SQLITE_Defensive ){ @@ -224784,21 +226642,29 @@ static int dbpageUpdate( zErr = "cannot delete"; goto update_fail; } - pgno = sqlite3_value_int(argv[0]); - if( sqlite3_value_type(argv[0])==SQLITE_NULL - || (Pgno)sqlite3_value_int(argv[1])!=pgno - ){ - zErr = "cannot insert"; - goto update_fail; + if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ + pgno = (Pgno)sqlite3_value_int(argv[2]); + isInsert = 1; + }else{ + pgno = sqlite3_value_int(argv[0]); + if( (Pgno)sqlite3_value_int(argv[1])!=pgno ){ + zErr = "cannot insert"; + goto update_fail; + } + isInsert = 0; } - zSchema = (const char*)sqlite3_value_text(argv[4]); - iDb = ALWAYS(zSchema) ? sqlite3FindDbName(pTab->db, zSchema) : -1; - if( NEVER(iDb<0) ){ - zErr = "no such schema"; - goto update_fail; + if( sqlite3_value_type(argv[4])==SQLITE_NULL ){ + iDb = 0; + }else{ + const char *zSchema = (const char*)sqlite3_value_text(argv[4]); + iDb = sqlite3FindDbName(pTab->db, zSchema); + if( iDb<0 ){ + zErr = "no such schema"; + goto update_fail; + } } pBt = pTab->db->aDb[iDb].pBt; - if( NEVER(pgno<1) || NEVER(pBt==0) || NEVER(pgno>sqlite3BtreeLastPage(pBt)) ){ + if( pgno<1 || NEVER(pBt==0) ){ zErr = "bad page number"; goto update_fail; } @@ -224806,51 +226672,83 @@ static int dbpageUpdate( if( sqlite3_value_type(argv[3])!=SQLITE_BLOB || sqlite3_value_bytes(argv[3])!=szPage ){ - zErr = "bad page value"; + if( sqlite3_value_type(argv[3])==SQLITE_NULL && isInsert && pgno>1 ){ + /* "INSERT INTO dbpage($PGNO,NULL)" causes page number $PGNO and + ** all subsequent pages to be deleted. */ + pTab->iDbTrunc = iDb; + pgno--; + pTab->pgnoTrunc = pgno; + }else{ + zErr = "bad page value"; + goto update_fail; + } + } + + if( dbpageBeginTrans(pTab)!=SQLITE_OK ){ + zErr = "failed to open transaction"; goto update_fail; } + pPager = sqlite3BtreePager(pBt); rc = sqlite3PagerGet(pPager, pgno, (DbPage**)&pDbPage, 0); if( rc==SQLITE_OK ){ const void *pData = sqlite3_value_blob(argv[3]); - assert( pData!=0 || pTab->db->mallocFailed ); - if( pData - && (rc = sqlite3PagerWrite(pDbPage))==SQLITE_OK - ){ - memcpy(sqlite3PagerGetData(pDbPage), pData, szPage); + if( (rc = sqlite3PagerWrite(pDbPage))==SQLITE_OK && pData ){ + unsigned char *aPage = sqlite3PagerGetData(pDbPage); + memcpy(aPage, pData, szPage); + pTab->pgnoTrunc = 0; } + }else{ + pTab->pgnoTrunc = 0; } sqlite3PagerUnref(pDbPage); return rc; update_fail: + pTab->pgnoTrunc = 0; sqlite3_free(pVtab->zErrMsg); pVtab->zErrMsg = sqlite3_mprintf("%s", zErr); return SQLITE_ERROR; } -/* Since we do not know in advance which database files will be -** written by the sqlite_dbpage virtual table, start a write transaction -** on them all. -*/ static int dbpageBegin(sqlite3_vtab *pVtab){ DbpageTable *pTab = (DbpageTable *)pVtab; - sqlite3 *db = pTab->db; - int i; - for(i=0; inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( pBt ) (void)sqlite3BtreeBeginTrans(pBt, 1, 0); + pTab->pgnoTrunc = 0; + return SQLITE_OK; +} + +/* Invoke sqlite3PagerTruncate() as necessary, just prior to COMMIT +*/ +static int dbpageSync(sqlite3_vtab *pVtab){ + DbpageTable *pTab = (DbpageTable *)pVtab; + if( pTab->pgnoTrunc>0 ){ + Btree *pBt = pTab->db->aDb[pTab->iDbTrunc].pBt; + Pager *pPager = sqlite3BtreePager(pBt); + sqlite3BtreeEnter(pBt); + if( pTab->pgnoTruncpgnoTrunc); + } + sqlite3BtreeLeave(pBt); } + pTab->pgnoTrunc = 0; return SQLITE_OK; } +/* Cancel any pending truncate. +*/ +static int dbpageRollbackTo(sqlite3_vtab *pVtab, int notUsed1){ + DbpageTable *pTab = (DbpageTable *)pVtab; + pTab->pgnoTrunc = 0; + (void)notUsed1; + return SQLITE_OK; +} /* ** Invoke this routine to register the "dbpage" virtual table module */ SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){ static sqlite3_module dbpage_module = { - 0, /* iVersion */ + 2, /* iVersion */ dbpageConnect, /* xCreate */ dbpageConnect, /* xConnect */ dbpageBestIndex, /* xBestIndex */ @@ -224865,14 +226763,14 @@ SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){ dbpageRowid, /* xRowid - read data */ dbpageUpdate, /* xUpdate */ dbpageBegin, /* xBegin */ - 0, /* xSync */ + dbpageSync, /* xSync */ 0, /* xCommit */ 0, /* xRollback */ 0, /* xFindMethod */ 0, /* xRename */ 0, /* xSavepoint */ 0, /* xRelease */ - 0, /* xRollbackTo */ + dbpageRollbackTo, /* xRollbackTo */ 0, /* xShadowName */ 0 /* xIntegrity */ }; @@ -224960,6 +226858,10 @@ struct SessionBuffer { ** input data. Input data may be supplied either as a single large buffer ** (e.g. sqlite3changeset_start()) or using a stream function (e.g. ** sqlite3changeset_start_strm()). +** +** bNoDiscard: +** If true, then the only time data is discarded is as a result of explicit +** sessionDiscardData() calls. Not within every sessionInputBuffer() call. */ struct SessionInput { int bNoDiscard; /* If true, do not discard in InputBuffer() */ @@ -225021,11 +226923,13 @@ struct sqlite3_changeset_iter { struct SessionTable { SessionTable *pNext; char *zName; /* Local name of table */ - int nCol; /* Number of columns in table zName */ + int nCol; /* Number of non-hidden columns */ + int nTotalCol; /* Number of columns including hidden */ int bStat1; /* True if this is sqlite_stat1 */ int bRowid; /* True if this table uses rowid for PK */ const char **azCol; /* Column names */ const char **azDflt; /* Default value expressions */ + int *aiIdx; /* Index to pass to xNew/xOld */ u8 *abPK; /* Array of primary key flags */ int nEntry; /* Total number of entries in hash table */ int nChange; /* Size of apChange[] array */ @@ -225428,22 +227332,22 @@ static int sessionPreupdateHash( unsigned int h = 0; /* Hash value to return */ int i; /* Used to iterate through columns */ + assert( pTab->nTotalCol==pSession->hook.xCount(pSession->hook.pCtx) ); if( pTab->bRowid ){ - assert( pTab->nCol-1==pSession->hook.xCount(pSession->hook.pCtx) ); h = sessionHashAppendI64(h, iRowid); }else{ assert( *pbNullPK==0 ); - assert( pTab->nCol==pSession->hook.xCount(pSession->hook.pCtx) ); for(i=0; inCol; i++){ if( pTab->abPK[i] ){ int rc; int eType; sqlite3_value *pVal; + int iIdx = pTab->aiIdx[i]; if( bNew ){ - rc = pSession->hook.xNew(pSession->hook.pCtx, i, &pVal); + rc = pSession->hook.xNew(pSession->hook.pCtx, iIdx, &pVal); }else{ - rc = pSession->hook.xOld(pSession->hook.pCtx, i, &pVal); + rc = pSession->hook.xOld(pSession->hook.pCtx, iIdx, &pVal); } if( rc!=SQLITE_OK ) return rc; @@ -225780,6 +227684,7 @@ static int sessionPreupdateEqual( sqlite3_value *pVal; /* Value returned by preupdate_new/old */ int rc; /* Error code from preupdate_new/old */ int eType = *a++; /* Type of value from change record */ + int iIdx = pTab->aiIdx[iCol]; /* The following calls to preupdate_new() and preupdate_old() can not ** fail. This is because they cache their return values, and by the @@ -225788,10 +227693,10 @@ static int sessionPreupdateEqual( ** this (that the method has already been called). */ if( op==SQLITE_INSERT ){ /* assert( db->pPreUpdate->pNewUnpacked || db->pPreUpdate->aNew ); */ - rc = pSession->hook.xNew(pSession->hook.pCtx, iCol, &pVal); + rc = pSession->hook.xNew(pSession->hook.pCtx, iIdx, &pVal); }else{ /* assert( db->pPreUpdate->pUnpacked ); */ - rc = pSession->hook.xOld(pSession->hook.pCtx, iCol, &pVal); + rc = pSession->hook.xOld(pSession->hook.pCtx, iIdx, &pVal); } assert( rc==SQLITE_OK ); (void)rc; /* Suppress warning about unused variable */ @@ -225916,9 +227821,11 @@ static int sessionTableInfo( const char *zDb, /* Name of attached database (e.g. "main") */ const char *zThis, /* Table name */ int *pnCol, /* OUT: number of columns */ + int *pnTotalCol, /* OUT: number of hidden columns */ const char **pzTab, /* OUT: Copy of zThis */ const char ***pazCol, /* OUT: Array of column names for table */ const char ***pazDflt, /* OUT: Array of default value expressions */ + int **paiIdx, /* OUT: Array of xNew/xOld indexes */ u8 **pabPK, /* OUT: Array of booleans - true for PK col */ int *pbRowid /* OUT: True if only PK is a rowid */ ){ @@ -225933,6 +227840,7 @@ static int sessionTableInfo( char **azCol = 0; char **azDflt = 0; u8 *abPK = 0; + int *aiIdx = 0; int bRowid = 0; /* Set to true to use rowid as PK */ assert( pazCol && pabPK ); @@ -225940,6 +227848,8 @@ static int sessionTableInfo( *pazCol = 0; *pabPK = 0; *pnCol = 0; + if( pnTotalCol ) *pnTotalCol = 0; + if( paiIdx ) *paiIdx = 0; if( pzTab ) *pzTab = 0; if( pazDflt ) *pazDflt = 0; @@ -225949,9 +227859,9 @@ static int sessionTableInfo( if( rc==SQLITE_OK ){ /* For sqlite_stat1, pretend that (tbl,idx) is the PRIMARY KEY. */ zPragma = sqlite3_mprintf( - "SELECT 0, 'tbl', '', 0, '', 1 UNION ALL " - "SELECT 1, 'idx', '', 0, '', 2 UNION ALL " - "SELECT 2, 'stat', '', 0, '', 0" + "SELECT 0, 'tbl', '', 0, '', 1, 0 UNION ALL " + "SELECT 1, 'idx', '', 0, '', 2, 0 UNION ALL " + "SELECT 2, 'stat', '', 0, '', 0, 0" ); }else if( rc==SQLITE_ERROR ){ zPragma = sqlite3_mprintf(""); @@ -225959,7 +227869,7 @@ static int sessionTableInfo( return rc; } }else{ - zPragma = sqlite3_mprintf("PRAGMA '%q'.table_info('%q')", zDb, zThis); + zPragma = sqlite3_mprintf("PRAGMA '%q'.table_xinfo('%q')", zDb, zThis); } if( !zPragma ){ return SQLITE_NOMEM; @@ -225976,7 +227886,9 @@ static int sessionTableInfo( while( SQLITE_ROW==sqlite3_step(pStmt) ){ nByte += sqlite3_column_bytes(pStmt, 1); /* name */ nByte += sqlite3_column_bytes(pStmt, 4); /* dflt_value */ - nDbCol++; + if( sqlite3_column_int(pStmt, 6)==0 ){ /* !hidden */ + nDbCol++; + } if( sqlite3_column_int(pStmt, 5) ) bRowid = 0; /* pk */ } if( nDbCol==0 ) bRowid = 0; @@ -225985,7 +227897,7 @@ static int sessionTableInfo( rc = sqlite3_reset(pStmt); if( rc==SQLITE_OK ){ - nByte += nDbCol * (sizeof(const char *)*2 + sizeof(u8) + 1 + 1); + nByte += nDbCol * (sizeof(const char *)*2 +sizeof(int)+sizeof(u8) + 1 + 1); pAlloc = sessionMalloc64(pSession, nByte); if( pAlloc==0 ){ rc = SQLITE_NOMEM; @@ -225996,8 +227908,8 @@ static int sessionTableInfo( if( rc==SQLITE_OK ){ azCol = (char **)pAlloc; azDflt = (char**)&azCol[nDbCol]; - pAlloc = (u8 *)&azDflt[nDbCol]; - abPK = (u8 *)pAlloc; + aiIdx = (int*)&azDflt[nDbCol]; + abPK = (u8 *)&aiIdx[nDbCol]; pAlloc = &abPK[nDbCol]; if( pzTab ){ memcpy(pAlloc, zThis, nThis+1); @@ -226012,27 +227924,32 @@ static int sessionTableInfo( azCol[i] = (char*)pAlloc; pAlloc += nName+1; abPK[i] = 1; + aiIdx[i] = -1; i++; } while( SQLITE_ROW==sqlite3_step(pStmt) ){ - int nName = sqlite3_column_bytes(pStmt, 1); - int nDflt = sqlite3_column_bytes(pStmt, 4); - const unsigned char *zName = sqlite3_column_text(pStmt, 1); - const unsigned char *zDflt = sqlite3_column_text(pStmt, 4); - - if( zName==0 ) break; - memcpy(pAlloc, zName, nName+1); - azCol[i] = (char *)pAlloc; - pAlloc += nName+1; - if( zDflt ){ - memcpy(pAlloc, zDflt, nDflt+1); - azDflt[i] = (char *)pAlloc; - pAlloc += nDflt+1; - }else{ - azDflt[i] = 0; + if( sqlite3_column_int(pStmt, 6)==0 ){ /* !hidden */ + int nName = sqlite3_column_bytes(pStmt, 1); + int nDflt = sqlite3_column_bytes(pStmt, 4); + const unsigned char *zName = sqlite3_column_text(pStmt, 1); + const unsigned char *zDflt = sqlite3_column_text(pStmt, 4); + + if( zName==0 ) break; + memcpy(pAlloc, zName, nName+1); + azCol[i] = (char *)pAlloc; + pAlloc += nName+1; + if( zDflt ){ + memcpy(pAlloc, zDflt, nDflt+1); + azDflt[i] = (char *)pAlloc; + pAlloc += nDflt+1; + }else{ + azDflt[i] = 0; + } + abPK[i] = sqlite3_column_int(pStmt, 5); + aiIdx[i] = sqlite3_column_int(pStmt, 0); + i++; } - abPK[i] = sqlite3_column_int(pStmt, 5); - i++; + if( pnTotalCol ) (*pnTotalCol)++; } rc = sqlite3_reset(pStmt); } @@ -226045,6 +227962,7 @@ static int sessionTableInfo( if( pazDflt ) *pazDflt = (const char**)azDflt; *pabPK = abPK; *pnCol = nDbCol; + if( paiIdx ) *paiIdx = aiIdx; }else{ sessionFree(pSession, azCol); } @@ -226076,7 +227994,8 @@ static int sessionInitTable( u8 *abPK; assert( pTab->azCol==0 || pTab->abPK==0 ); rc = sessionTableInfo(pSession, db, zDb, - pTab->zName, &pTab->nCol, 0, &pTab->azCol, &pTab->azDflt, &abPK, + pTab->zName, &pTab->nCol, &pTab->nTotalCol, 0, &pTab->azCol, + &pTab->azDflt, &pTab->aiIdx, &abPK, ((pSession==0 || pSession->bImplicitPK) ? &pTab->bRowid : 0) ); if( rc==SQLITE_OK ){ @@ -226111,15 +228030,17 @@ static int sessionInitTable( */ static int sessionReinitTable(sqlite3_session *pSession, SessionTable *pTab){ int nCol = 0; + int nTotalCol = 0; const char **azCol = 0; const char **azDflt = 0; + int *aiIdx = 0; u8 *abPK = 0; int bRowid = 0; assert( pSession->rc==SQLITE_OK ); pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb, - pTab->zName, &nCol, 0, &azCol, &azDflt, &abPK, + pTab->zName, &nCol, &nTotalCol, 0, &azCol, &azDflt, &aiIdx, &abPK, (pSession->bImplicitPK ? &bRowid : 0) ); if( pSession->rc==SQLITE_OK ){ @@ -226142,8 +228063,10 @@ static int sessionReinitTable(sqlite3_session *pSession, SessionTable *pTab){ const char **a = pTab->azCol; pTab->azCol = azCol; pTab->nCol = nCol; + pTab->nTotalCol = nTotalCol; pTab->azDflt = azDflt; pTab->abPK = abPK; + pTab->aiIdx = aiIdx; azCol = a; } if( pSession->bEnableSize ){ @@ -226461,7 +228384,7 @@ static int sessionUpdateMaxSize( int ii; for(ii=0; iinCol; ii++){ sqlite3_value *p = 0; - pSession->hook.xNew(pSession->hook.pCtx, ii, &p); + pSession->hook.xNew(pSession->hook.pCtx, pTab->aiIdx[ii], &p); sessionSerializeValue(0, p, &nNew); } } @@ -226481,8 +228404,9 @@ static int sessionUpdateMaxSize( int bChanged = 1; int nOld = 0; int eType; + int iIdx = pTab->aiIdx[ii]; sqlite3_value *p = 0; - pSession->hook.xNew(pSession->hook.pCtx, ii-pTab->bRowid, &p); + pSession->hook.xNew(pSession->hook.pCtx, iIdx, &p); if( p==0 ){ return SQLITE_NOMEM; } @@ -226579,11 +228503,11 @@ static void sessionPreupdateOneChange( /* Check the number of columns in this xPreUpdate call matches the ** number of columns in the table. */ nExpect = pSession->hook.xCount(pSession->hook.pCtx); - if( (pTab->nCol-pTab->bRowid)nTotalColnCol-pTab->bRowid)!=nExpect ){ + if( pTab->nTotalCol!=nExpect ){ pSession->rc = SQLITE_SCHEMA; return; } @@ -226640,19 +228564,23 @@ static void sessionPreupdateOneChange( /* Figure out how large an allocation is required */ nByte = sizeof(SessionChange); - for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ + for(i=pTab->bRowid; inCol; i++){ + int iIdx = pTab->aiIdx[i]; sqlite3_value *p = 0; if( op!=SQLITE_INSERT ){ - TESTONLY(int trc = ) pSession->hook.xOld(pSession->hook.pCtx, i, &p); - assert( trc==SQLITE_OK ); + /* This may fail if the column has a non-NULL default and was added + ** using ALTER TABLE ADD COLUMN after this record was created. */ + rc = pSession->hook.xOld(pSession->hook.pCtx, iIdx, &p); }else if( pTab->abPK[i] ){ - TESTONLY(int trc = ) pSession->hook.xNew(pSession->hook.pCtx, i, &p); + TESTONLY(int trc = ) pSession->hook.xNew(pSession->hook.pCtx,iIdx,&p); assert( trc==SQLITE_OK ); } - /* This may fail if SQLite value p contains a utf-16 string that must - ** be converted to utf-8 and an OOM error occurs while doing so. */ - rc = sessionSerializeValue(0, p, &nByte); + if( rc==SQLITE_OK ){ + /* This may fail if SQLite value p contains a utf-16 string that must + ** be converted to utf-8 and an OOM error occurs while doing so. */ + rc = sessionSerializeValue(0, p, &nByte); + } if( rc!=SQLITE_OK ) goto error_out; } if( pTab->bRowid ){ @@ -226679,12 +228607,13 @@ static void sessionPreupdateOneChange( sessionPutI64(&pC->aRecord[1], iRowid); nByte = 9; } - for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ + for(i=pTab->bRowid; inCol; i++){ sqlite3_value *p = 0; + int iIdx = pTab->aiIdx[i]; if( op!=SQLITE_INSERT ){ - pSession->hook.xOld(pSession->hook.pCtx, i, &p); + pSession->hook.xOld(pSession->hook.pCtx, iIdx, &p); }else if( pTab->abPK[i] ){ - pSession->hook.xNew(pSession->hook.pCtx, i, &p); + pSession->hook.xNew(pSession->hook.pCtx, iIdx, &p); } sessionSerializeValue(&pC->aRecord[nByte], p, &nByte); } @@ -227086,7 +229015,8 @@ SQLITE_API int sqlite3session_diff( int bRowid = 0; u8 *abPK; const char **azCol = 0; - rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, 0, &abPK, + rc = sessionTableInfo(0, db, zFrom, zTbl, + &nCol, 0, 0, &azCol, 0, 0, &abPK, pSession->bImplicitPK ? &bRowid : 0 ); if( rc==SQLITE_OK ){ @@ -227410,9 +229340,11 @@ static void sessionAppendIdent( char *zOut = (char *)&p->aBuf[p->nBuf]; const char *zIn = zStr; *zOut++ = '"'; - while( *zIn ){ - if( *zIn=='"' ) *zOut++ = '"'; - *zOut++ = *(zIn++); + if( zIn!=0 ){ + while( *zIn ){ + if( *zIn=='"' ) *zOut++ = '"'; + *zOut++ = *(zIn++); + } } *zOut++ = '"'; p->nBuf = (int)((u8 *)zOut - p->aBuf); @@ -227663,10 +229595,10 @@ static int sessionSelectStmt( int rc = SQLITE_OK; char *zSql = 0; const char *zSep = ""; - const char *zCols = bRowid ? SESSIONS_ROWID ", *" : "*"; int nSql = -1; int i; + SessionBuffer cols = {0, 0, 0}; SessionBuffer nooptest = {0, 0, 0}; SessionBuffer pkfield = {0, 0, 0}; SessionBuffer pkvar = {0, 0, 0}; @@ -227679,9 +229611,16 @@ static int sessionSelectStmt( sessionAppendStr(&pkvar, "?1, (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)", &rc ); - zCols = "tbl, ?2, stat"; + sessionAppendStr(&cols, "tbl, ?2, stat", &rc); }else{ + #if 0 + if( bRowid ){ + sessionAppendStr(&cols, SESSIONS_ROWID, &rc); + } + #endif for(i=0; iflags & SQLITE_FkNoAction; assert( xConflict!=0 ); + sqlite3_mutex_enter(sqlite3_db_mutex(db)); + if( flags & SQLITE_CHANGESETAPPLY_FKNOACTION ){ + db->flags |= ((u64)SQLITE_FkNoAction); + db->aDb[0].pSchema->schema_cookie -= 32; + } + pIter->in.bNoDiscard = 1; memset(&sApply, 0, sizeof(sApply)); sApply.bRebase = (ppRebase && pnRebase); sApply.bInvertConstraints = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); sApply.bIgnoreNoop = !!(flags & SQLITE_CHANGESETAPPLY_IGNORENOOP); - sqlite3_mutex_enter(sqlite3_db_mutex(db)); if( (flags & SQLITE_CHANGESETAPPLY_NOSAVEPOINT)==0 ){ rc = sqlite3_exec(db, "SAVEPOINT changeset_apply", 0, 0, 0); } @@ -230076,7 +232022,8 @@ static int sessionChangesetApply( sqlite3changeset_pk(pIter, &abPK, 0); rc = sessionTableInfo(0, db, "main", zNew, - &sApply.nCol, &zTab, &sApply.azCol, 0, &sApply.abPK, &sApply.bRowid + &sApply.nCol, 0, &zTab, &sApply.azCol, 0, 0, + &sApply.abPK, &sApply.bRowid ); if( rc!=SQLITE_OK ) break; for(i=0; iflags & SQLITE_FkNoAction ); + db->flags &= ~((u64)SQLITE_FkNoAction); + db->aDb[0].pSchema->schema_cookie -= 32; + } sqlite3_mutex_leave(sqlite3_db_mutex(db)); return rc; } @@ -230208,12 +232166,6 @@ SQLITE_API int sqlite3changeset_apply_v2( sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ int bInv = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset, bInv, 1); - u64 savedFlag = db->flags & SQLITE_FkNoAction; - - if( flags & SQLITE_CHANGESETAPPLY_FKNOACTION ){ - db->flags |= ((u64)SQLITE_FkNoAction); - db->aDb[0].pSchema->schema_cookie -= 32; - } if( rc==SQLITE_OK ){ rc = sessionChangesetApply( @@ -230221,11 +232173,6 @@ SQLITE_API int sqlite3changeset_apply_v2( ); } - if( (flags & SQLITE_CHANGESETAPPLY_FKNOACTION) && savedFlag==0 ){ - assert( db->flags & SQLITE_FkNoAction ); - db->flags &= ~((u64)SQLITE_FkNoAction); - db->aDb[0].pSchema->schema_cookie -= 32; - } return rc; } @@ -230546,6 +232493,9 @@ static int sessionChangesetExtendRecord( sessionAppendBlob(pOut, aRec, nRec, &rc); if( rc==SQLITE_OK && pTab->pDfltStmt==0 ){ rc = sessionPrepareDfltStmt(pGrp->db, pTab, &pTab->pDfltStmt); + if( rc==SQLITE_OK && SQLITE_ROW!=sqlite3_step(pTab->pDfltStmt) ){ + rc = sqlite3_errcode(pGrp->db); + } } for(ii=nCol; rc==SQLITE_OK && iinCol; ii++){ int eType = sqlite3_column_type(pTab->pDfltStmt, ii); @@ -230562,6 +232512,7 @@ static int sessionChangesetExtendRecord( } if( SQLITE_OK==sessionBufferGrow(pOut, 8, &rc) ){ sessionPutI64(&pOut->aBuf[pOut->nBuf], iVal); + pOut->nBuf += 8; } break; } @@ -230701,6 +232652,8 @@ static int sessionOneChangeToHash( u8 *aRec = &pIter->in.aData[pIter->in.iCurrent + 2]; int nRec = (pIter->in.iNext - pIter->in.iCurrent) - 2; + assert( nRec>0 ); + /* Ensure that only changesets, or only patchsets, but not a mixture ** of both, are being combined. It is an error to try to combine a ** changeset and a patchset. */ @@ -230778,6 +232731,7 @@ static int sessionChangesetToHash( int nRec; int rc = SQLITE_OK; + pIter->in.bNoDiscard = 1; while( SQLITE_ROW==(sessionChangesetNext(pIter, &aRec, &nRec, 0)) ){ rc = sessionOneChangeToHash(pGrp, pIter, bRebase); if( rc!=SQLITE_OK ) break; @@ -231409,7 +233363,27 @@ SQLITE_API int sqlite3session_config(int op, void *pArg){ /************** End of sqlite3session.c **************************************/ /************** Begin file fts5.c ********************************************/ - +/* +** This, the "fts5.c" source file, is a composite file that is itself +** assembled from the following files: +** +** fts5.h +** fts5Int.h +** fts5parse.h <--- Generated from fts5parse.y by Lemon +** fts5parse.c <--- Generated from fts5parse.y by Lemon +** fts5_aux.c +** fts5_buffer.c +** fts5_config.c +** fts5_expr.c +** fts5_hash.c +** fts5_index.c +** fts5_main.c +** fts5_storage.c +** fts5_tokenize.c +** fts5_unicode2.c +** fts5_varint.c +** fts5_vocab.c +*/ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) @@ -231419,6 +233393,12 @@ SQLITE_API int sqlite3session_config(int op, void *pArg){ # undef NDEBUG #endif +#ifdef HAVE_STDINT_H +/* #include */ +#endif +#ifdef HAVE_INTTYPES_H +/* #include */ +#endif /* ** 2014 May 31 ** @@ -231659,6 +233639,10 @@ struct Fts5PhraseIter { ** (i.e. if it is a contentless table), then this API always iterates ** through an empty set (all calls to xPhraseFirst() set iCol to -1). ** +** In all cases, matches are visited in (column ASC, offset ASC) order. +** i.e. all those in column 0, sorted by offset, followed by those in +** column 1, etc. +** ** xPhraseNext() ** See xPhraseFirst above. ** @@ -231715,19 +233699,57 @@ struct Fts5PhraseIter { ** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, ** output variable (*ppToken) is set to point to a buffer containing the ** matching document token, and (*pnToken) to the size of that buffer in -** bytes. This API is not available if the specified token matches a -** prefix query term. In that case both output variables are always set -** to 0. +** bytes. ** ** The output text is not a copy of the document text that was tokenized. ** It is the output of the tokenizer module. For tokendata=1 tables, this ** includes any embedded 0x00 and trailing data. ** +** This API may be slow in some cases if the token identified by parameters +** iIdx and iToken matched a prefix token in the query. In most cases, the +** first call to this API for each prefix token in the query is forced +** to scan the portion of the full-text index that matches the prefix +** token to collect the extra data required by this API. If the prefix +** token matches a large number of token instances in the document set, +** this may be a performance problem. +** +** If the user knows in advance that a query may use this API for a +** prefix token, FTS5 may be configured to collect all required data as part +** of the initial querying of the full-text index, avoiding the second scan +** entirely. This also causes prefix queries that do not use this API to +** run more slowly and use more memory. FTS5 may be configured in this way +** either on a per-table basis using the [FTS5 insttoken | 'insttoken'] +** option, or on a per-query basis using the +** [fts5_insttoken | fts5_insttoken()] user function. +** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. +** +** xColumnLocale(pFts5, iIdx, pzLocale, pnLocale) +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the locale associated +** with column iCol of the current row. Usually, there is no associated +** locale, and output parameters (*pzLocale) and (*pnLocale) are set +** to NULL and 0, respectively. However, if the fts5_locale() function +** was used to associate a locale with the value when it was inserted +** into the fts5 table, then (*pzLocale) is set to point to a nul-terminated +** buffer containing the name of the locale in utf-8 encoding. (*pnLocale) +** is set to the size in bytes of the buffer, not including the +** nul-terminator. +** +** If successful, SQLITE_OK is returned. Or, if an error occurs, an +** SQLite error code is returned. The final value of the output parameters +** is undefined in this case. +** +** xTokenize_v2: +** Tokenize text using the tokenizer belonging to the FTS5 table. This +** API is the same as the xTokenize() API, except that it allows a tokenizer +** locale to be specified. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 4 */ void *(*xUserData)(Fts5Context*); @@ -231769,6 +233791,15 @@ struct Fts5ExtensionApi { const char **ppToken, int *pnToken ); int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); + + /* Below this point are iVersion>=4 only */ + int (*xColumnLocale)(Fts5Context*, int iCol, const char **pz, int *pn); + int (*xTokenize_v2)(Fts5Context*, + const char *pText, int nText, /* Text to tokenize */ + const char *pLocale, int nLocale, /* Locale to pass to tokenizer */ + void *pCtx, /* Context passed to xToken() */ + int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ + ); }; /* @@ -231789,7 +233820,7 @@ struct Fts5ExtensionApi { ** A tokenizer instance is required to actually tokenize text. ** ** The first argument passed to this function is a copy of the (void*) -** pointer provided by the application when the fts5_tokenizer object +** pointer provided by the application when the fts5_tokenizer_v2 object ** was registered with FTS5 (the third argument to xCreateTokenizer()). ** The second and third arguments are an array of nul-terminated strings ** containing the tokenizer arguments, if any, specified following the @@ -231813,7 +233844,7 @@ struct Fts5ExtensionApi { ** argument passed to this function is a pointer to an Fts5Tokenizer object ** returned by an earlier call to xCreate(). ** -** The second argument indicates the reason that FTS5 is requesting +** The third argument indicates the reason that FTS5 is requesting ** tokenization of the supplied text. This is always one of the following ** four values: ** @@ -231837,6 +233868,13 @@ struct Fts5ExtensionApi { ** on a columnsize=0 database. ** ** +** The sixth and seventh arguments passed to xTokenize() - pLocale and +** nLocale - are a pointer to a buffer containing the locale to use for +** tokenization (e.g. "en_US") and its size in bytes, respectively. The +** pLocale buffer is not nul-terminated. pLocale may be passed NULL (in +** which case nLocale is always 0) to indicate that the tokenizer should +** use its default locale. +** ** For each token in the input string, the supplied callback xToken() must ** be invoked. The first argument to it should be a copy of the pointer ** passed as the second argument to xTokenize(). The third and fourth @@ -231860,6 +233898,30 @@ struct Fts5ExtensionApi { ** may abandon the tokenization and return any error code other than ** SQLITE_OK or SQLITE_DONE. ** +** If the tokenizer is registered using an fts5_tokenizer_v2 object, +** then the xTokenize() method has two additional arguments - pLocale +** and nLocale. These specify the locale that the tokenizer should use +** for the current request. If pLocale and nLocale are both 0, then the +** tokenizer should use its default locale. Otherwise, pLocale points to +** an nLocale byte buffer containing the name of the locale to use as utf-8 +** text. pLocale is not nul-terminated. +** +** FTS5_TOKENIZER +** +** There is also an fts5_tokenizer object. This is an older, deprecated, +** version of fts5_tokenizer_v2. It is similar except that: +** +**
    +**
  • There is no "iVersion" field, and +**
  • The xTokenize() method does not take a locale argument. +**
+** +** Legacy fts5_tokenizer tokenizers must be registered using the +** legacy xCreateTokenizer() function, instead of xCreateTokenizer_v2(). +** +** Tokenizer implementations registered using either API may be retrieved +** using both xFindTokenizer() and xFindTokenizer_v2(). +** ** SYNONYM SUPPORT ** ** Custom tokenizers may also support synonyms. Consider a case in which a @@ -231968,6 +234030,33 @@ struct Fts5ExtensionApi { ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer; +typedef struct fts5_tokenizer_v2 fts5_tokenizer_v2; +struct fts5_tokenizer_v2 { + int iVersion; /* Currently always 2 */ + + int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); + void (*xDelete)(Fts5Tokenizer*); + int (*xTokenize)(Fts5Tokenizer*, + void *pCtx, + int flags, /* Mask of FTS5_TOKENIZE_* flags */ + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)( + void *pCtx, /* Copy of 2nd argument to xTokenize() */ + int tflags, /* Mask of FTS5_TOKEN_* flags */ + const char *pToken, /* Pointer to buffer containing token */ + int nToken, /* Size of token in bytes */ + int iStart, /* Byte offset of token within input text */ + int iEnd /* Byte offset of end of token within input text */ + ) + ); +}; + +/* +** New code should use the fts5_tokenizer_v2 type to define tokenizer +** implementations. The following type is included for legacy applications +** that still use it. +*/ typedef struct fts5_tokenizer fts5_tokenizer; struct fts5_tokenizer { int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); @@ -231987,6 +234076,7 @@ struct fts5_tokenizer { ); }; + /* Flags that may be passed as the third argument to xTokenize() */ #define FTS5_TOKENIZE_QUERY 0x0001 #define FTS5_TOKENIZE_PREFIX 0x0002 @@ -232006,7 +234096,7 @@ struct fts5_tokenizer { */ typedef struct fts5_api fts5_api; struct fts5_api { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ /* Create a new tokenizer */ int (*xCreateTokenizer)( @@ -232033,6 +234123,25 @@ struct fts5_api { fts5_extension_function xFunction, void (*xDestroy)(void*) ); + + /* APIs below this point are only available if iVersion>=3 */ + + /* Create a new tokenizer */ + int (*xCreateTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void *pUserData, + fts5_tokenizer_v2 *pTokenizer, + void (*xDestroy)(void*) + ); + + /* Find an existing tokenizer */ + int (*xFindTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer + ); }; /* @@ -232106,6 +234215,22 @@ typedef sqlite3_uint64 u64; # define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32)) # define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64) +/* The uptr type is an unsigned integer large enough to hold a pointer +*/ +#if defined(HAVE_STDINT_H) + typedef uintptr_t uptr; +#elif SQLITE_PTRSIZE==4 + typedef u32 uptr; +#else + typedef u64 uptr; +#endif + +#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&3)==0) +#else +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&7)==0) +#endif + #endif /* Truncate very long tokens to this many bytes. Hard limit is @@ -232189,6 +234314,18 @@ struct Fts5Colset { */ typedef struct Fts5Config Fts5Config; +typedef struct Fts5TokenizerConfig Fts5TokenizerConfig; + +struct Fts5TokenizerConfig { + Fts5Tokenizer *pTok; + fts5_tokenizer_v2 *pApi2; + fts5_tokenizer *pApi1; + const char **azArg; + int nArg; + int ePattern; /* FTS_PATTERN_XXX constant */ + const char *pLocale; /* Current locale to use */ + int nLocale; /* Size of pLocale in bytes */ +}; /* ** An instance of the following structure encodes all information that can @@ -232228,9 +234365,12 @@ typedef struct Fts5Config Fts5Config; ** ** INSERT INTO tbl(tbl, rank) VALUES('prefix-index', $bPrefixIndex); ** +** bLocale: +** Set to true if locale=1 was specified when the table was created. */ struct Fts5Config { sqlite3 *db; /* Database handle */ + Fts5Global *pGlobal; /* Global fts5 object for handle db */ char *zDb; /* Database holding FTS index (e.g. "main") */ char *zName; /* Name of FTS index */ int nCol; /* Number of columns */ @@ -232240,16 +234380,17 @@ struct Fts5Config { int *aPrefix; /* Sizes in bytes of nPrefix prefix indexes */ int eContent; /* An FTS5_CONTENT value */ int bContentlessDelete; /* "contentless_delete=" option (dflt==0) */ + int bContentlessUnindexed; /* "contentless_unindexed=" option (dflt=0) */ char *zContent; /* content table */ char *zContentRowid; /* "content_rowid=" option value */ int bColumnsize; /* "columnsize=" option value (dflt==1) */ int bTokendata; /* "tokendata=" option value (dflt==0) */ + int bLocale; /* "locale=" option value (dflt==0) */ int eDetail; /* FTS5_DETAIL_XXX value */ char *zContentExprlist; - Fts5Tokenizer *pTok; - fts5_tokenizer *pTokApi; + Fts5TokenizerConfig t; int bLock; /* True when table is preparing statement */ - int ePattern; /* FTS_PATTERN_XXX constant */ + /* Values loaded from the %_config table */ int iVersion; /* fts5 file format 'version' */ @@ -232262,7 +234403,8 @@ struct Fts5Config { char *zRank; /* Name of rank function */ char *zRankArgs; /* Arguments to rank function */ int bSecureDelete; /* 'secure-delete' */ - int nDeleteMerge; /* 'deletemerge' */ + int nDeleteMerge; /* 'deletemerge' */ + int bPrefixInsttoken; /* 'prefix-insttoken' */ /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */ char **pzErrmsg; @@ -232278,9 +234420,10 @@ struct Fts5Config { #define FTS5_CURRENT_VERSION 4 #define FTS5_CURRENT_VERSION_SECUREDELETE 5 -#define FTS5_CONTENT_NORMAL 0 -#define FTS5_CONTENT_NONE 1 -#define FTS5_CONTENT_EXTERNAL 2 +#define FTS5_CONTENT_NORMAL 0 +#define FTS5_CONTENT_NONE 1 +#define FTS5_CONTENT_EXTERNAL 2 +#define FTS5_CONTENT_UNINDEXED 3 #define FTS5_DETAIL_FULL 0 #define FTS5_DETAIL_NONE 1 @@ -232315,6 +234458,8 @@ static int sqlite3Fts5ConfigSetValue(Fts5Config*, const char*, sqlite3_value*, i static int sqlite3Fts5ConfigParseRank(const char*, char**, char**); +static void sqlite3Fts5ConfigErrmsg(Fts5Config *pConfig, const char *zFmt, ...); + /* ** End of interface to code in fts5_config.c. **************************************************************************/ @@ -232359,7 +234504,7 @@ static char *sqlite3Fts5Mprintf(int *pRc, const char *zFmt, ...); static void sqlite3Fts5Put32(u8*, int); static int sqlite3Fts5Get32(const u8*); -#define FTS5_POS2COLUMN(iPos) (int)(iPos >> 32) +#define FTS5_POS2COLUMN(iPos) (int)((iPos >> 32) & 0x7FFFFFFF) #define FTS5_POS2OFFSET(iPos) (int)(iPos & 0x7FFFFFFF) typedef struct Fts5PoslistReader Fts5PoslistReader; @@ -232516,7 +234661,14 @@ static int sqlite3Fts5StructureTest(Fts5Index*, void*); /* ** Used by xInstToken(): */ -static int sqlite3Fts5IterToken(Fts5IndexIter*, i64, int, int, const char**, int*); +static int sqlite3Fts5IterToken( + Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, + i64 iRowid, + int iCol, + int iOff, + const char **ppOut, int *pnOut +); /* ** Insert or remove data to or from the index. Each time a document is @@ -232644,18 +234796,20 @@ struct Fts5Table { Fts5Index *pIndex; /* Full-text index */ }; -static int sqlite3Fts5GetTokenizer( - Fts5Global*, - const char **azArg, - int nArg, - Fts5Config*, - char **pzErr -); +static int sqlite3Fts5LoadTokenizer(Fts5Config *pConfig); static Fts5Table *sqlite3Fts5TableFromCsrid(Fts5Global*, i64); static int sqlite3Fts5FlushToDisk(Fts5Table*); +static void sqlite3Fts5ClearLocale(Fts5Config *pConfig); +static void sqlite3Fts5SetLocale(Fts5Config *pConfig, const char *pLoc, int nLoc); + +static int sqlite3Fts5IsLocaleValue(Fts5Config *pConfig, sqlite3_value *pVal); +static int sqlite3Fts5DecodeLocaleValue(sqlite3_value *pVal, + const char **ppText, int *pnText, const char **ppLoc, int *pnLoc +); + /* ** End of interface to code in fts5.c. **************************************************************************/ @@ -232735,8 +234889,8 @@ static int sqlite3Fts5StorageRename(Fts5Storage*, const char *zName); static int sqlite3Fts5DropAll(Fts5Config*); static int sqlite3Fts5CreateTable(Fts5Config*, const char*, const char*, int, char **); -static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64, sqlite3_value**); -static int sqlite3Fts5StorageContentInsert(Fts5Storage *p, sqlite3_value**, i64*); +static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64, sqlite3_value**, int); +static int sqlite3Fts5StorageContentInsert(Fts5Storage *p, int, sqlite3_value**, i64*); static int sqlite3Fts5StorageIndexInsert(Fts5Storage *p, sqlite3_value**, i64); static int sqlite3Fts5StorageIntegrity(Fts5Storage *p, int iArg); @@ -232761,6 +234915,9 @@ static int sqlite3Fts5StorageOptimize(Fts5Storage *p); static int sqlite3Fts5StorageMerge(Fts5Storage *p, int nMerge); static int sqlite3Fts5StorageReset(Fts5Storage *p); +static void sqlite3Fts5StorageReleaseDeleteRow(Fts5Storage*); +static int sqlite3Fts5StorageFindDeleteRow(Fts5Storage *p, i64 iDel); + /* ** End of interface to code in fts5_storage.c. **************************************************************************/ @@ -232913,6 +235070,7 @@ static int sqlite3Fts5TokenizerPattern( int (*xCreate)(void*, const char**, int, Fts5Tokenizer**), Fts5Tokenizer *pTok ); +static int sqlite3Fts5TokenizerPreload(Fts5TokenizerConfig*); /* ** End of interface to code in fts5_tokenizer.c. **************************************************************************/ @@ -234690,6 +236848,7 @@ static int fts5HighlightCb( return rc; } + /* ** Implementation of highlight() function. */ @@ -234720,12 +236879,19 @@ static void fts5HighlightFunction( sqlite3_result_text(pCtx, "", -1, SQLITE_STATIC); rc = SQLITE_OK; }else if( ctx.zIn ){ + const char *pLoc = 0; /* Locale of column iCol */ + int nLoc = 0; /* Size of pLoc in bytes */ if( rc==SQLITE_OK ){ rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter); } if( rc==SQLITE_OK ){ - rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); + rc = pApi->xColumnLocale(pFts, iCol, &pLoc, &nLoc); + } + if( rc==SQLITE_OK ){ + rc = pApi->xTokenize_v2( + pFts, ctx.zIn, ctx.nIn, pLoc, nLoc, (void*)&ctx, fts5HighlightCb + ); } if( ctx.bOpen ){ fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); @@ -234922,6 +237088,8 @@ static void fts5SnippetFunction( memset(&sFinder, 0, sizeof(Fts5SFinder)); for(i=0; ixColumnText(pFts, i, &sFinder.zDoc, &nDoc); if( rc!=SQLITE_OK ) break; - rc = pApi->xTokenize(pFts, - sFinder.zDoc, nDoc, (void*)&sFinder,fts5SentenceFinderCb + rc = pApi->xColumnLocale(pFts, i, &pLoc, &nLoc); + if( rc!=SQLITE_OK ) break; + rc = pApi->xTokenize_v2(pFts, + sFinder.zDoc, nDoc, pLoc, nLoc, (void*)&sFinder, fts5SentenceFinderCb ); if( rc!=SQLITE_OK ) break; rc = pApi->xColumnSize(pFts, i, &nDocsize); @@ -234988,6 +237158,9 @@ static void fts5SnippetFunction( rc = pApi->xColumnSize(pFts, iBestCol, &nColSize); } if( ctx.zIn ){ + const char *pLoc = 0; /* Locale of column iBestCol */ + int nLoc = 0; /* Bytes in pLoc */ + if( rc==SQLITE_OK ){ rc = fts5CInstIterInit(pApi, pFts, iBestCol, &ctx.iter); } @@ -235006,7 +237179,12 @@ static void fts5SnippetFunction( } if( rc==SQLITE_OK ){ - rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); + rc = pApi->xColumnLocale(pFts, iBestCol, &pLoc, &nLoc); + } + if( rc==SQLITE_OK ){ + rc = pApi->xTokenize_v2( + pFts, ctx.zIn, ctx.nIn, pLoc, nLoc, (void*)&ctx,fts5HighlightCb + ); } if( ctx.bOpen ){ fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); @@ -235190,6 +237368,53 @@ static void fts5Bm25Function( } } +/* +** Implementation of fts5_get_locale() function. +*/ +static void fts5GetLocaleFunction( + const Fts5ExtensionApi *pApi, /* API offered by current FTS version */ + Fts5Context *pFts, /* First arg to pass to pApi functions */ + sqlite3_context *pCtx, /* Context for returning result/error */ + int nVal, /* Number of values in apVal[] array */ + sqlite3_value **apVal /* Array of trailing arguments */ +){ + int iCol = 0; + int eType = 0; + int rc = SQLITE_OK; + const char *zLocale = 0; + int nLocale = 0; + + /* xColumnLocale() must be available */ + assert( pApi->iVersion>=4 ); + + if( nVal!=1 ){ + const char *z = "wrong number of arguments to function fts5_get_locale()"; + sqlite3_result_error(pCtx, z, -1); + return; + } + + eType = sqlite3_value_numeric_type(apVal[0]); + if( eType!=SQLITE_INTEGER ){ + const char *z = "non-integer argument passed to function fts5_get_locale()"; + sqlite3_result_error(pCtx, z, -1); + return; + } + + iCol = sqlite3_value_int(apVal[0]); + if( iCol<0 || iCol>=pApi->xColumnCount(pFts) ){ + sqlite3_result_error_code(pCtx, SQLITE_RANGE); + return; + } + + rc = pApi->xColumnLocale(pFts, iCol, &zLocale, &nLocale); + if( rc!=SQLITE_OK ){ + sqlite3_result_error_code(pCtx, rc); + return; + } + + sqlite3_result_text(pCtx, zLocale, nLocale, SQLITE_TRANSIENT); +} + static int sqlite3Fts5AuxInit(fts5_api *pApi){ struct Builtin { const char *zFunc; /* Function name (nul-terminated) */ @@ -235197,9 +237422,10 @@ static int sqlite3Fts5AuxInit(fts5_api *pApi){ fts5_extension_function xFunc;/* Callback function */ void (*xDestroy)(void*); /* Destructor function */ } aBuiltin [] = { - { "snippet", 0, fts5SnippetFunction, 0 }, - { "highlight", 0, fts5HighlightFunction, 0 }, - { "bm25", 0, fts5Bm25Function, 0 }, + { "snippet", 0, fts5SnippetFunction, 0 }, + { "highlight", 0, fts5HighlightFunction, 0 }, + { "bm25", 0, fts5Bm25Function, 0 }, + { "fts5_get_locale", 0, fts5GetLocaleFunction, 0 }, }; int rc = SQLITE_OK; /* Return code */ int i; /* To iterate through builtin functions */ @@ -235864,7 +238090,6 @@ static int fts5ConfigSetEnum( ** eventually free any such error message using sqlite3_free(). */ static int fts5ConfigParseSpecial( - Fts5Global *pGlobal, Fts5Config *pConfig, /* Configuration object to update */ const char *zCmd, /* Special command to parse */ const char *zArg, /* Argument to parse */ @@ -235872,6 +238097,7 @@ static int fts5ConfigParseSpecial( ){ int rc = SQLITE_OK; int nCmd = (int)strlen(zCmd); + if( sqlite3_strnicmp("prefix", zCmd, nCmd)==0 ){ const int nByte = sizeof(int) * FTS5_MAX_PREFIX_INDEXES; const char *p; @@ -235928,12 +238154,11 @@ static int fts5ConfigParseSpecial( if( sqlite3_strnicmp("tokenize", zCmd, nCmd)==0 ){ const char *p = (const char*)zArg; sqlite3_int64 nArg = strlen(zArg) + 1; - char **azArg = sqlite3Fts5MallocZero(&rc, sizeof(char*) * nArg); - char *pDel = sqlite3Fts5MallocZero(&rc, nArg * 2); - char *pSpace = pDel; + char **azArg = sqlite3Fts5MallocZero(&rc, (sizeof(char*) + 2) * nArg); - if( azArg && pSpace ){ - if( pConfig->pTok ){ + if( azArg ){ + char *pSpace = (char*)&azArg[nArg]; + if( pConfig->t.azArg ){ *pzErr = sqlite3_mprintf("multiple tokenize=... directives"); rc = SQLITE_ERROR; }else{ @@ -235956,16 +238181,14 @@ static int fts5ConfigParseSpecial( *pzErr = sqlite3_mprintf("parse error in tokenize directive"); rc = SQLITE_ERROR; }else{ - rc = sqlite3Fts5GetTokenizer(pGlobal, - (const char**)azArg, (int)nArg, pConfig, - pzErr - ); + pConfig->t.azArg = (const char**)azArg; + pConfig->t.nArg = nArg; + azArg = 0; } } } - sqlite3_free(azArg); - sqlite3_free(pDel); + return rc; } @@ -235994,6 +238217,16 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("contentless_unindexed", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed contentless_delete=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bContentlessUnindexed = (zArg[0]=='1'); + } + return rc; + } + if( sqlite3_strnicmp("content_rowid", zCmd, nCmd)==0 ){ if( pConfig->zContentRowid ){ *pzErr = sqlite3_mprintf("multiple content_rowid=... directives"); @@ -236014,6 +238247,16 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("locale", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed locale=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bLocale = (zArg[0]=='1'); + } + return rc; + } + if( sqlite3_strnicmp("detail", zCmd, nCmd)==0 ){ const Fts5Enum aDetail[] = { { "none", FTS5_DETAIL_NONE }, @@ -236042,16 +238285,6 @@ static int fts5ConfigParseSpecial( return SQLITE_ERROR; } -/* -** Allocate an instance of the default tokenizer ("simple") at -** Fts5Config.pTokenizer. Return SQLITE_OK if successful, or an SQLite error -** code if an error occurs. -*/ -static int fts5ConfigDefaultTokenizer(Fts5Global *pGlobal, Fts5Config *pConfig){ - assert( pConfig->pTok==0 && pConfig->pTokApi==0 ); - return sqlite3Fts5GetTokenizer(pGlobal, 0, 0, pConfig, 0); -} - /* ** Gobble up the first bareword or quoted word from the input buffer zIn. ** Return a pointer to the character immediately following the last in @@ -236111,7 +238344,8 @@ static int fts5ConfigParseColumn( Fts5Config *p, char *zCol, char *zArg, - char **pzErr + char **pzErr, + int *pbUnindexed ){ int rc = SQLITE_OK; if( 0==sqlite3_stricmp(zCol, FTS5_RANK_NAME) @@ -236122,6 +238356,7 @@ static int fts5ConfigParseColumn( }else if( zArg ){ if( 0==sqlite3_stricmp(zArg, "unindexed") ){ p->abUnindexed[p->nCol] = 1; + *pbUnindexed = 1; }else{ *pzErr = sqlite3_mprintf("unrecognized column option: %s", zArg); rc = SQLITE_ERROR; @@ -236142,11 +238377,26 @@ static int fts5ConfigMakeExprlist(Fts5Config *p){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, "T.%Q", p->zContentRowid); if( p->eContent!=FTS5_CONTENT_NONE ){ + assert( p->eContent==FTS5_CONTENT_EXTERNAL + || p->eContent==FTS5_CONTENT_NORMAL + || p->eContent==FTS5_CONTENT_UNINDEXED + ); for(i=0; inCol; i++){ if( p->eContent==FTS5_CONTENT_EXTERNAL ){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.%Q", p->azCol[i]); - }else{ + }else if( p->eContent==FTS5_CONTENT_NORMAL || p->abUnindexed[i] ){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.c%d", i); + }else{ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", NULL"); + } + } + } + if( p->eContent==FTS5_CONTENT_NORMAL && p->bLocale ){ + for(i=0; inCol; i++){ + if( p->abUnindexed[i]==0 ){ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.l%d", i); + }else{ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", NULL"); } } } @@ -236180,10 +238430,12 @@ static int sqlite3Fts5ConfigParse( Fts5Config *pRet; /* New object to return */ int i; sqlite3_int64 nByte; + int bUnindexed = 0; /* True if there are one or more UNINDEXED */ *ppOut = pRet = (Fts5Config*)sqlite3_malloc(sizeof(Fts5Config)); if( pRet==0 ) return SQLITE_NOMEM; memset(pRet, 0, sizeof(Fts5Config)); + pRet->pGlobal = pGlobal; pRet->db = db; pRet->iCookie = -1; @@ -236232,13 +238484,13 @@ static int sqlite3Fts5ConfigParse( rc = SQLITE_ERROR; }else{ if( bOption ){ - rc = fts5ConfigParseSpecial(pGlobal, pRet, + rc = fts5ConfigParseSpecial(pRet, ALWAYS(zOne)?zOne:"", zTwo?zTwo:"", pzErr ); }else{ - rc = fts5ConfigParseColumn(pRet, zOne, zTwo, pzErr); + rc = fts5ConfigParseColumn(pRet, zOne, zTwo, pzErr, &bUnindexed); zOne = 0; } } @@ -236270,11 +238522,17 @@ static int sqlite3Fts5ConfigParse( rc = SQLITE_ERROR; } - /* If a tokenizer= option was successfully parsed, the tokenizer has - ** already been allocated. Otherwise, allocate an instance of the default - ** tokenizer (unicode61) now. */ - if( rc==SQLITE_OK && pRet->pTok==0 ){ - rc = fts5ConfigDefaultTokenizer(pGlobal, pRet); + /* We only allow contentless_unindexed=1 if the table is actually a + ** contentless one. + */ + if( rc==SQLITE_OK + && pRet->bContentlessUnindexed + && pRet->eContent!=FTS5_CONTENT_NONE + ){ + *pzErr = sqlite3_mprintf( + "contentless_unindexed=1 requires a contentless table" + ); + rc = SQLITE_ERROR; } /* If no zContent option was specified, fill in the default values. */ @@ -236285,6 +238543,9 @@ static int sqlite3Fts5ConfigParse( ); if( pRet->eContent==FTS5_CONTENT_NORMAL ){ zTail = "content"; + }else if( bUnindexed && pRet->bContentlessUnindexed ){ + pRet->eContent = FTS5_CONTENT_UNINDEXED; + zTail = "content"; }else if( pRet->bColumnsize ){ zTail = "docsize"; } @@ -236318,9 +238579,14 @@ static int sqlite3Fts5ConfigParse( static void sqlite3Fts5ConfigFree(Fts5Config *pConfig){ if( pConfig ){ int i; - if( pConfig->pTok ){ - pConfig->pTokApi->xDelete(pConfig->pTok); + if( pConfig->t.pTok ){ + if( pConfig->t.pApi1 ){ + pConfig->t.pApi1->xDelete(pConfig->t.pTok); + }else{ + pConfig->t.pApi2->xDelete(pConfig->t.pTok); + } } + sqlite3_free((char*)pConfig->t.azArg); sqlite3_free(pConfig->zDb); sqlite3_free(pConfig->zName); for(i=0; inCol; i++){ @@ -236395,10 +238661,24 @@ static int sqlite3Fts5Tokenize( void *pCtx, /* Context passed to xToken() */ int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ ){ - if( pText==0 ) return SQLITE_OK; - return pConfig->pTokApi->xTokenize( - pConfig->pTok, pCtx, flags, pText, nText, xToken - ); + int rc = SQLITE_OK; + if( pText ){ + if( pConfig->t.pTok==0 ){ + rc = sqlite3Fts5LoadTokenizer(pConfig); + } + if( rc==SQLITE_OK ){ + if( pConfig->t.pApi1 ){ + rc = pConfig->t.pApi1->xTokenize( + pConfig->t.pTok, pCtx, flags, pText, nText, xToken + ); + }else{ + rc = pConfig->t.pApi2->xTokenize(pConfig->t.pTok, pCtx, flags, + pText, nText, pConfig->t.pLocale, pConfig->t.nLocale, xToken + ); + } + } + } + return rc; } /* @@ -236602,6 +238882,19 @@ static int sqlite3Fts5ConfigSetValue( }else{ pConfig->bSecureDelete = (bVal ? 1 : 0); } + } + + else if( 0==sqlite3_stricmp(zKey, "insttoken") ){ + int bVal = -1; + if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ + bVal = sqlite3_value_int(pVal); + } + if( bVal<0 ){ + *pbBadkey = 1; + }else{ + pConfig->bPrefixInsttoken = (bVal ? 1 : 0); + } + }else{ *pbBadkey = 1; } @@ -236652,13 +238945,10 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ && iVersion!=FTS5_CURRENT_VERSION_SECUREDELETE ){ rc = SQLITE_ERROR; - if( pConfig->pzErrmsg ){ - assert( 0==*pConfig->pzErrmsg ); - *pConfig->pzErrmsg = sqlite3_mprintf("invalid fts5 file format " - "(found %d, expected %d or %d) - run 'rebuild'", - iVersion, FTS5_CURRENT_VERSION, FTS5_CURRENT_VERSION_SECUREDELETE - ); - } + sqlite3Fts5ConfigErrmsg(pConfig, "invalid fts5 file format " + "(found %d, expected %d or %d) - run 'rebuild'", + iVersion, FTS5_CURRENT_VERSION, FTS5_CURRENT_VERSION_SECUREDELETE + ); }else{ pConfig->iVersion = iVersion; } @@ -236669,6 +238959,29 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ return rc; } +/* +** Set (*pConfig->pzErrmsg) to point to an sqlite3_malloc()ed buffer +** containing the error message created using printf() style formatting +** string zFmt and its trailing arguments. +*/ +static void sqlite3Fts5ConfigErrmsg(Fts5Config *pConfig, const char *zFmt, ...){ + va_list ap; /* ... printf arguments */ + char *zMsg = 0; + + va_start(ap, zFmt); + zMsg = sqlite3_vmprintf(zFmt, ap); + if( pConfig->pzErrmsg ){ + assert( *pConfig->pzErrmsg==0 ); + *pConfig->pzErrmsg = zMsg; + }else{ + sqlite3_free(zMsg); + } + + va_end(ap); +} + + + /* ** 2014 May 31 ** @@ -236725,7 +239038,7 @@ struct Fts5Expr { /* ** eType: -** Expression node type. Always one of: +** Expression node type. Usually one of: ** ** FTS5_AND (nChild, apChild valid) ** FTS5_OR (nChild, apChild valid) @@ -236733,6 +239046,10 @@ struct Fts5Expr { ** FTS5_STRING (pNear valid) ** FTS5_TERM (pNear valid) ** +** An expression node with eType==0 may also exist. It always matches zero +** rows. This is created when a phrase containing no tokens is parsed. +** e.g. "". +** ** iHeight: ** Distance from this node to furthest leaf. This is always 0 for nodes ** of type FTS5_STRING and FTS5_TERM. For all other nodes it is one @@ -236953,11 +239270,12 @@ static int sqlite3Fts5ExprNew( }while( sParse.rc==SQLITE_OK && t!=FTS5_EOF ); sqlite3Fts5ParserFree(pEngine, fts5ParseFree); + assert( sParse.pExpr || sParse.rc!=SQLITE_OK ); assert_expr_depth_ok(sParse.rc, sParse.pExpr); /* If the LHS of the MATCH expression was a user column, apply the ** implicit column-filter. */ - if( iColnCol && sParse.pExpr && sParse.rc==SQLITE_OK ){ + if( sParse.rc==SQLITE_OK && iColnCol ){ int n = sizeof(Fts5Colset); Fts5Colset *pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&sParse.rc, n); if( pColset ){ @@ -236974,15 +239292,7 @@ static int sqlite3Fts5ExprNew( sParse.rc = SQLITE_NOMEM; sqlite3Fts5ParseNodeFree(sParse.pExpr); }else{ - if( !sParse.pExpr ){ - const int nByte = sizeof(Fts5ExprNode); - pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&sParse.rc, nByte); - if( pNew->pRoot ){ - pNew->pRoot->bEof = 1; - } - }else{ - pNew->pRoot = sParse.pExpr; - } + pNew->pRoot = sParse.pExpr; pNew->pIndex = 0; pNew->pConfig = pConfig; pNew->apExprPhrase = sParse.apPhrase; @@ -237800,7 +240110,7 @@ static int fts5ExprNodeTest_STRING( } }else{ Fts5IndexIter *pIter = pPhrase->aTerm[j].pIter; - if( pIter->iRowid==iLast || pIter->bEof ) continue; + if( pIter->iRowid==iLast ) continue; bMatch = 0; if( fts5ExprAdvanceto(pIter, bDesc, &iLast, &rc, &pNode->bEof) ){ return rc; @@ -238322,9 +240632,6 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset( Fts5ExprNearset *pRet = 0; if( pParse->rc==SQLITE_OK ){ - if( pPhrase==0 ){ - return pNear; - } if( pNear==0 ){ sqlite3_int64 nByte; nByte = sizeof(Fts5ExprNearset) + SZALLOC * sizeof(Fts5ExprPhrase*); @@ -238546,6 +240853,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm( }else if( sCtx.pPhrase->nTerm ){ sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = (u8)bPrefix; } + assert( pParse->apPhrase!=0 ); pParse->apPhrase[pParse->nPhrase-1] = sCtx.pPhrase; } @@ -238565,7 +240873,7 @@ static int sqlite3Fts5ExprClonePhrase( Fts5ExprPhrase *pOrig = 0; /* The phrase extracted from pExpr */ Fts5Expr *pNew = 0; /* Expression to return via *ppNew */ TokenCtx sCtx = {0,0,0}; /* Context object for fts5ParseTokenize */ - if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + if( !pExpr || iPhrase<0 || iPhrase>=pExpr->nPhrase ){ rc = SQLITE_RANGE; }else{ pOrig = pExpr->apExprPhrase[iPhrase]; @@ -238933,6 +241241,9 @@ static void fts5ExprAssignXNext(Fts5ExprNode *pNode){ } } +/* +** Add pSub as a child of p. +*/ static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){ int ii = p->nChild; if( p->eType!=FTS5_NOT && pSub->eType==p->eType ){ @@ -239077,19 +241388,23 @@ static Fts5ExprNode *sqlite3Fts5ParseNode( "fts5: %s queries are not supported (detail!=full)", pNear->nPhrase==1 ? "phrase": "NEAR" ); - sqlite3_free(pRet); + sqlite3Fts5ParseNodeFree(pRet); pRet = 0; + pNear = 0; + assert( pLeft==0 && pRight==0 ); } } }else{ + assert( pNear==0 ); fts5ExprAddChildren(pRet, pLeft); fts5ExprAddChildren(pRet, pRight); + pLeft = pRight = 0; if( pRet->iHeight>SQLITE_FTS5_MAX_EXPR_DEPTH ){ sqlite3Fts5ParseError(pParse, "fts5 expression tree is too large (maximum depth %d)", SQLITE_FTS5_MAX_EXPR_DEPTH ); - sqlite3_free(pRet); + sqlite3Fts5ParseNodeFree(pRet); pRet = 0; } } @@ -239141,6 +241456,8 @@ static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( ); if( pRight->eType==FTS5_EOF ){ + assert( pParse->apPhrase!=0 ); + assert( pParse->nPhrase>0 ); assert( pParse->apPhrase[pParse->nPhrase-1]==pRight->pNear->apPhrase[0] ); sqlite3Fts5ParseNodeFree(pRight); pRet = pLeft; @@ -239713,7 +242030,7 @@ static int fts5ExprPopulatePoslistsCb( int rc = sqlite3Fts5PoslistWriterAppend( &pExpr->apExprPhrase[i]->poslist, &p->aPopulator[i].writer, p->iOff ); - if( rc==SQLITE_OK && pExpr->pConfig->bTokendata && !pT->bPrefix ){ + if( rc==SQLITE_OK && (pExpr->pConfig->bTokendata || pT->bPrefix) ){ int iCol = p->iOff>>32; int iTokOff = p->iOff & 0x7FFFFFFF; rc = sqlite3Fts5IndexIterWriteTokendata( @@ -239773,6 +242090,7 @@ static int fts5ExprCheckPoslists(Fts5ExprNode *pNode, i64 iRowid){ pNode->iRowid = iRowid; pNode->bEof = 0; switch( pNode->eType ){ + case 0: case FTS5_TERM: case FTS5_STRING: return (pNode->pNear->apPhrase[0]->poslist.n>0); @@ -239905,15 +242223,14 @@ static int sqlite3Fts5ExprInstToken( return SQLITE_RANGE; } pTerm = &pPhrase->aTerm[iToken]; - if( pTerm->bPrefix==0 ){ - if( pExpr->pConfig->bTokendata ){ - rc = sqlite3Fts5IterToken( - pTerm->pIter, iRowid, iCol, iOff+iToken, ppOut, pnOut - ); - }else{ - *ppOut = pTerm->pTerm; - *pnOut = pTerm->nFullTerm; - } + if( pExpr->pConfig->bTokendata || pTerm->bPrefix ){ + rc = sqlite3Fts5IterToken( + pTerm->pIter, pTerm->pTerm, pTerm->nQueryTerm, + iRowid, iCol, iOff+iToken, ppOut, pnOut + ); + }else{ + *ppOut = pTerm->pTerm; + *pnOut = pTerm->nFullTerm; } return rc; } @@ -241303,11 +243620,13 @@ static int fts5LeafFirstTermOff(Fts5Data *pLeaf){ /* ** Close the read-only blob handle, if it is open. */ -static void sqlite3Fts5IndexCloseReader(Fts5Index *p){ +static void fts5IndexCloseReader(Fts5Index *p){ if( p->pReader ){ + int rc; sqlite3_blob *pReader = p->pReader; p->pReader = 0; - sqlite3_blob_close(pReader); + rc = sqlite3_blob_close(pReader); + if( p->rc==SQLITE_OK ) p->rc = rc; } } @@ -241332,7 +243651,7 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ assert( p->pReader==0 ); p->pReader = pBlob; if( rc!=SQLITE_OK ){ - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); } if( rc==SQLITE_ABORT ) rc = SQLITE_OK; } @@ -241356,11 +243675,12 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ if( rc==SQLITE_OK ){ u8 *aOut = 0; /* Read blob data into this buffer */ int nByte = sqlite3_blob_bytes(p->pReader); - sqlite3_int64 nAlloc = sizeof(Fts5Data) + nByte + FTS5_DATA_PADDING; + int szData = (sizeof(Fts5Data) + 7) & ~7; + sqlite3_int64 nAlloc = szData + nByte + FTS5_DATA_PADDING; pRet = (Fts5Data*)sqlite3_malloc64(nAlloc); if( pRet ){ pRet->nn = nByte; - aOut = pRet->p = (u8*)&pRet[1]; + aOut = pRet->p = (u8*)pRet + szData; }else{ rc = SQLITE_NOMEM; } @@ -241383,6 +243703,7 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ } assert( (pRet==0)==(p->rc!=SQLITE_OK) ); + assert( pRet==0 || EIGHT_BYTE_ALIGNMENT( pRet->p ) ); return pRet; } @@ -241414,9 +243735,13 @@ static int fts5IndexPrepareStmt( ){ if( p->rc==SQLITE_OK ){ if( zSql ){ - p->rc = sqlite3_prepare_v3(p->pConfig->db, zSql, -1, + int rc = sqlite3_prepare_v3(p->pConfig->db, zSql, -1, SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_NO_VTAB, ppStmt, 0); + /* If this prepare() call fails with SQLITE_ERROR, then one of the + ** %_idx or %_data tables has been removed or modified. Call this + ** corruption. */ + p->rc = (rc==SQLITE_ERROR ? SQLITE_CORRUPT : rc); }else{ p->rc = SQLITE_NOMEM; } @@ -242708,7 +245033,7 @@ static void fts5SegIterNext_None( if( iOffiEndofDoclist ){ /* Next entry is on the current page */ - i64 iDelta; + u64 iDelta; iOff += sqlite3Fts5GetVarint(&pIter->pLeaf->p[iOff], (u64*)&iDelta); pIter->iLeafOffset = iOff; pIter->iRowid += iDelta; @@ -245412,6 +247737,11 @@ static int fts5IndexFindDeleteMerge(Fts5Index *p, Fts5Structure *pStruct){ nBest = nPercent; } } + + /* If pLvl is already the input level to an ongoing merge, look no + ** further for a merge candidate. The caller should be allowed to + ** continue merging from pLvl first. */ + if( pLvl->nMerge ) break; } } return iRet; @@ -245523,6 +247853,14 @@ static int fts5IndexReturn(Fts5Index *p){ return rc; } +/* +** Close the read-only blob handle, if it is open. +*/ +static void sqlite3Fts5IndexCloseReader(Fts5Index *p){ + fts5IndexCloseReader(p); + fts5IndexReturn(p); +} + typedef struct Fts5FlushCtx Fts5FlushCtx; struct Fts5FlushCtx { Fts5Index *pIdx; @@ -245980,8 +248318,11 @@ static void fts5DoSecureDelete( ** This is called as part of flushing a delete to disk in 'secure-delete' ** mode. It edits the segments within the database described by argument ** pStruct to remove the entries for term zTerm, rowid iRowid. +** +** Return SQLITE_OK if successful, or an SQLite error code if an error +** has occurred. Any error code is also stored in the Fts5Index handle. */ -static void fts5FlushSecureDelete( +static int fts5FlushSecureDelete( Fts5Index *p, Fts5Structure *pStruct, const char *zTerm, @@ -245991,6 +248332,24 @@ static void fts5FlushSecureDelete( const int f = FTS5INDEX_QUERY_SKIPHASH; Fts5Iter *pIter = 0; /* Used to find term instance */ + /* If the version number has not been set to SECUREDELETE, do so now. */ + if( p->pConfig->iVersion!=FTS5_CURRENT_VERSION_SECUREDELETE ){ + Fts5Config *pConfig = p->pConfig; + sqlite3_stmt *pStmt = 0; + fts5IndexPrepareStmt(p, &pStmt, sqlite3_mprintf( + "REPLACE INTO %Q.'%q_config' VALUES ('version', %d)", + pConfig->zDb, pConfig->zName, FTS5_CURRENT_VERSION_SECUREDELETE + )); + if( p->rc==SQLITE_OK ){ + int rc; + sqlite3_step(pStmt); + rc = sqlite3_finalize(pStmt); + if( p->rc==SQLITE_OK ) p->rc = rc; + pConfig->iCookie++; + pConfig->iVersion = FTS5_CURRENT_VERSION_SECUREDELETE; + } + } + fts5MultiIterNew(p, pStruct, f, 0, (const u8*)zTerm, nTerm, -1, 0, &pIter); if( fts5MultiIterEof(p, pIter)==0 ){ i64 iThis = fts5MultiIterRowid(pIter); @@ -246008,6 +248367,7 @@ static void fts5FlushSecureDelete( } fts5MultiIterFree(pIter); + return p->rc; } @@ -246091,8 +248451,9 @@ static void fts5FlushOneHash(Fts5Index *p){ ** using fts5FlushSecureDelete(). */ if( bSecureDelete ){ if( eDetail==FTS5_DETAIL_NONE ){ - if( iOffrc!=SQLITE_OK || pDoclist[iOff]==0x01 ){ iOff++; continue; @@ -246721,6 +249083,383 @@ static void fts5MergePrefixLists( *p1 = out; } + +/* +** Iterate through a range of entries in the FTS index, invoking the xVisit +** callback for each of them. +** +** Parameter pToken points to an nToken buffer containing an FTS index term +** (i.e. a document term with the preceding 1 byte index identifier - +** FTS5_MAIN_PREFIX or similar). If bPrefix is true, then the call visits +** all entries for terms that have pToken/nToken as a prefix. If bPrefix +** is false, then only entries with pToken/nToken as the entire key are +** visited. +** +** If the current table is a tokendata=1 table, then if bPrefix is true then +** each index term is treated separately. However, if bPrefix is false, then +** all index terms corresponding to pToken/nToken are collapsed into a single +** term before the callback is invoked. +** +** The callback invoked for each entry visited is specified by paramter xVisit. +** Each time it is invoked, it is passed a pointer to the Fts5Index object, +** a copy of the 7th paramter to this function (pCtx) and a pointer to the +** iterator that indicates the current entry. If the current entry is the +** first with a new term (i.e. different from that of the previous entry, +** including the very first term), then the final two parameters are passed +** a pointer to the term and its size in bytes, respectively. If the current +** entry is not the first associated with its term, these two parameters +** are passed 0. +** +** If parameter pColset is not NULL, then it is used to filter entries before +** the callback is invoked. +*/ +static int fts5VisitEntries( + Fts5Index *p, /* Fts5 index object */ + Fts5Colset *pColset, /* Columns filter to apply, or NULL */ + u8 *pToken, /* Buffer containing token */ + int nToken, /* Size of buffer pToken in bytes */ + int bPrefix, /* True for a prefix scan */ + void (*xVisit)(Fts5Index*, void *pCtx, Fts5Iter *pIter, const u8*, int), + void *pCtx /* Passed as second argument to xVisit() */ +){ + const int flags = (bPrefix ? FTS5INDEX_QUERY_SCAN : 0) + | FTS5INDEX_QUERY_SKIPEMPTY + | FTS5INDEX_QUERY_NOOUTPUT; + Fts5Iter *p1 = 0; /* Iterator used to gather data from index */ + int bNewTerm = 1; + Fts5Structure *pStruct = fts5StructureRead(p); + + fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); + fts5IterSetOutputCb(&p->rc, p1); + for( /* no-op */ ; + fts5MultiIterEof(p, p1)==0; + fts5MultiIterNext2(p, p1, &bNewTerm) + ){ + Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; + int nNew = 0; + const u8 *pNew = 0; + + p1->xSetOutputs(p1, pSeg); + if( p->rc ) break; + + if( bNewTerm ){ + nNew = pSeg->term.n; + pNew = pSeg->term.p; + if( nNewrc; +} + + +/* +** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an +** array of these for each row it visits (so all iRowid fields are the same). +** Or, for an iterator used by an "ORDER BY rank" query, it accumulates an +** array of these for the entire query (in which case iRowid fields may take +** a variety of values). +** +** Each instance in the array indicates the iterator (and therefore term) +** associated with position iPos of rowid iRowid. This is used by the +** xInstToken() API. +** +** iRowid: +** Rowid for the current entry. +** +** iPos: +** Position of current entry within row. In the usual ((iCol<<32)+iOff) +** format (e.g. see macros FTS5_POS2COLUMN() and FTS5_POS2OFFSET()). +** +** iIter: +** If the Fts5TokenDataIter iterator that the entry is part of is +** actually an iterator (i.e. with nIter>0, not just a container for +** Fts5TokenDataMap structures), then this variable is an index into +** the apIter[] array. The corresponding term is that which the iterator +** at apIter[iIter] currently points to. +** +** Or, if the Fts5TokenDataIter iterator is just a container object +** (nIter==0), then iIter is an index into the term.p[] buffer where +** the term is stored. +** +** nByte: +** In the case where iIter is an index into term.p[], this variable +** is the size of the term in bytes. If iIter is an index into apIter[], +** this variable is unused. +*/ +struct Fts5TokenDataMap { + i64 iRowid; /* Row this token is located in */ + i64 iPos; /* Position of token */ + int iIter; /* Iterator token was read from */ + int nByte; /* Length of token in bytes (or 0) */ +}; + +/* +** An object used to supplement Fts5Iter for tokendata=1 iterators. +** +** This object serves two purposes. The first is as a container for an array +** of Fts5TokenDataMap structures, which are used to find the token required +** when the xInstToken() API is used. This is done by the nMapAlloc, nMap and +** aMap[] variables. +*/ +struct Fts5TokenDataIter { + int nMapAlloc; /* Allocated size of aMap[] in entries */ + int nMap; /* Number of valid entries in aMap[] */ + Fts5TokenDataMap *aMap; /* Array of (rowid+pos -> token) mappings */ + + /* The following are used for prefix-queries only. */ + Fts5Buffer terms; + + /* The following are used for other full-token tokendata queries only. */ + int nIter; + int nIterAlloc; + Fts5PoslistReader *aPoslistReader; + int *aPoslistToIter; + Fts5Iter *apIter[1]; +}; + +/* +** The two input arrays - a1[] and a2[] - are in sorted order. This function +** merges the two arrays together and writes the result to output array +** aOut[]. aOut[] is guaranteed to be large enough to hold the result. +** +** Duplicate entries are copied into the output. So the size of the output +** array is always (n1+n2) entries. +*/ +static void fts5TokendataMerge( + Fts5TokenDataMap *a1, int n1, /* Input array 1 */ + Fts5TokenDataMap *a2, int n2, /* Input array 2 */ + Fts5TokenDataMap *aOut /* Output array */ +){ + int i1 = 0; + int i2 = 0; + + assert( n1>=0 && n2>=0 ); + while( i1=n2 || (i1rc==SQLITE_OK ){ + if( pT->nMap==pT->nMapAlloc ){ + int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64; + int nAlloc = nNew * sizeof(Fts5TokenDataMap); + Fts5TokenDataMap *aNew; + + aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nAlloc); + if( aNew==0 ){ + p->rc = SQLITE_NOMEM; + return; + } + + pT->aMap = aNew; + pT->nMapAlloc = nNew; + } + + pT->aMap[pT->nMap].iRowid = iRowid; + pT->aMap[pT->nMap].iPos = iPos; + pT->aMap[pT->nMap].iIter = iIter; + pT->aMap[pT->nMap].nByte = nByte; + pT->nMap++; + } +} + +/* +** Sort the contents of the pT->aMap[] array. +** +** The sorting algorithm requries a malloc(). If this fails, an error code +** is left in Fts5Index.rc before returning. +*/ +static void fts5TokendataIterSortMap(Fts5Index *p, Fts5TokenDataIter *pT){ + Fts5TokenDataMap *aTmp = 0; + int nByte = pT->nMap * sizeof(Fts5TokenDataMap); + + aTmp = (Fts5TokenDataMap*)sqlite3Fts5MallocZero(&p->rc, nByte); + if( aTmp ){ + Fts5TokenDataMap *a1 = pT->aMap; + Fts5TokenDataMap *a2 = aTmp; + i64 nHalf; + + for(nHalf=1; nHalfnMap; nHalf=nHalf*2){ + int i1; + for(i1=0; i1nMap; i1+=(nHalf*2)){ + int n1 = MIN(nHalf, pT->nMap-i1); + int n2 = MIN(nHalf, pT->nMap-i1-n1); + fts5TokendataMerge(&a1[i1], n1, &a1[i1+n1], n2, &a2[i1]); + } + SWAPVAL(Fts5TokenDataMap*, a1, a2); + } + + if( a1!=pT->aMap ){ + memcpy(pT->aMap, a1, pT->nMap*sizeof(Fts5TokenDataMap)); + } + sqlite3_free(aTmp); + +#ifdef SQLITE_DEBUG + { + int ii; + for(ii=1; iinMap; ii++){ + Fts5TokenDataMap *p1 = &pT->aMap[ii-1]; + Fts5TokenDataMap *p2 = &pT->aMap[ii]; + assert( p1->iRowidiRowid + || (p1->iRowid==p2->iRowid && p1->iPos<=p2->iPos) + ); + } + } +#endif + } +} + +/* +** Delete an Fts5TokenDataIter structure and its contents. +*/ +static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){ + if( pSet ){ + int ii; + for(ii=0; iinIter; ii++){ + fts5MultiIterFree(pSet->apIter[ii]); + } + fts5BufferFree(&pSet->terms); + sqlite3_free(pSet->aPoslistReader); + sqlite3_free(pSet->aMap); + sqlite3_free(pSet); + } +} + + +/* +** fts5VisitEntries() context object used by fts5SetupPrefixIterTokendata() +** to pass data to prefixIterSetupTokendataCb(). +*/ +typedef struct TokendataSetupCtx TokendataSetupCtx; +struct TokendataSetupCtx { + Fts5TokenDataIter *pT; /* Object being populated with mappings */ + int iTermOff; /* Offset of current term in terms.p[] */ + int nTermByte; /* Size of current term in bytes */ +}; + +/* +** fts5VisitEntries() callback used by fts5SetupPrefixIterTokendata(). This +** callback adds an entry to the Fts5TokenDataIter.aMap[] array for each +** position in the current position-list. It doesn't matter that some of +** these may be out of order - they will be sorted later. +*/ +static void prefixIterSetupTokendataCb( + Fts5Index *p, + void *pCtx, + Fts5Iter *p1, + const u8 *pNew, + int nNew +){ + TokendataSetupCtx *pSetup = (TokendataSetupCtx*)pCtx; + int iPosOff = 0; + i64 iPos = 0; + + if( pNew ){ + pSetup->nTermByte = nNew-1; + pSetup->iTermOff = pSetup->pT->terms.n; + fts5BufferAppendBlob(&p->rc, &pSetup->pT->terms, nNew-1, pNew+1); + } + + while( 0==sqlite3Fts5PoslistNext64( + p1->base.pData, p1->base.nData, &iPosOff, &iPos + ) ){ + fts5TokendataIterAppendMap(p, + pSetup->pT, pSetup->iTermOff, pSetup->nTermByte, p1->base.iRowid, iPos + ); + } +} + + +/* +** Context object passed by fts5SetupPrefixIter() to fts5VisitEntries(). +*/ +typedef struct PrefixSetupCtx PrefixSetupCtx; +struct PrefixSetupCtx { + void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*); + void (*xAppend)(Fts5Index*, u64, Fts5Iter*, Fts5Buffer*); + i64 iLastRowid; + int nMerge; + Fts5Buffer *aBuf; + int nBuf; + Fts5Buffer doclist; + TokendataSetupCtx *pTokendata; +}; + +/* +** fts5VisitEntries() callback used by fts5SetupPrefixIter() +*/ +static void prefixIterSetupCb( + Fts5Index *p, + void *pCtx, + Fts5Iter *p1, + const u8 *pNew, + int nNew +){ + PrefixSetupCtx *pSetup = (PrefixSetupCtx*)pCtx; + const int nMerge = pSetup->nMerge; + + if( p1->base.nData>0 ){ + if( p1->base.iRowid<=pSetup->iLastRowid && pSetup->doclist.n>0 ){ + int i; + for(i=0; p->rc==SQLITE_OK && pSetup->doclist.n; i++){ + int i1 = i*nMerge; + int iStore; + assert( i1+nMerge<=pSetup->nBuf ); + for(iStore=i1; iStoreaBuf[iStore].n==0 ){ + fts5BufferSwap(&pSetup->doclist, &pSetup->aBuf[iStore]); + fts5BufferZero(&pSetup->doclist); + break; + } + } + if( iStore==i1+nMerge ){ + pSetup->xMerge(p, &pSetup->doclist, nMerge, &pSetup->aBuf[i1]); + for(iStore=i1; iStoreaBuf[iStore]); + } + } + } + pSetup->iLastRowid = 0; + } + + pSetup->xAppend( + p, (u64)p1->base.iRowid-(u64)pSetup->iLastRowid, p1, &pSetup->doclist + ); + pSetup->iLastRowid = p1->base.iRowid; + } + + if( pSetup->pTokendata ){ + prefixIterSetupTokendataCb(p, (void*)pSetup->pTokendata, p1, pNew, nNew); + } +} + static void fts5SetupPrefixIter( Fts5Index *p, /* Index to read from */ int bDesc, /* True for "ORDER BY rowid DESC" */ @@ -246731,38 +249470,41 @@ static void fts5SetupPrefixIter( Fts5Iter **ppIter /* OUT: New iterator */ ){ Fts5Structure *pStruct; - Fts5Buffer *aBuf; - int nBuf = 32; - int nMerge = 1; + PrefixSetupCtx s; + TokendataSetupCtx s2; + + memset(&s, 0, sizeof(s)); + memset(&s2, 0, sizeof(s2)); + + s.nMerge = 1; + s.iLastRowid = 0; + s.nBuf = 32; + if( iIdx==0 + && p->pConfig->eDetail==FTS5_DETAIL_FULL + && p->pConfig->bPrefixInsttoken + ){ + s.pTokendata = &s2; + s2.pT = (Fts5TokenDataIter*)fts5IdxMalloc(p, sizeof(*s2.pT)); + } - void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*); - void (*xAppend)(Fts5Index*, u64, Fts5Iter*, Fts5Buffer*); if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){ - xMerge = fts5MergeRowidLists; - xAppend = fts5AppendRowid; + s.xMerge = fts5MergeRowidLists; + s.xAppend = fts5AppendRowid; }else{ - nMerge = FTS5_MERGE_NLIST-1; - nBuf = nMerge*8; /* Sufficient to merge (16^8)==(2^32) lists */ - xMerge = fts5MergePrefixLists; - xAppend = fts5AppendPoslist; + s.nMerge = FTS5_MERGE_NLIST-1; + s.nBuf = s.nMerge*8; /* Sufficient to merge (16^8)==(2^32) lists */ + s.xMerge = fts5MergePrefixLists; + s.xAppend = fts5AppendPoslist; } - aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf); + s.aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*s.nBuf); pStruct = fts5StructureRead(p); - assert( p->rc!=SQLITE_OK || (aBuf && pStruct) ); + assert( p->rc!=SQLITE_OK || (s.aBuf && pStruct) ); if( p->rc==SQLITE_OK ){ - const int flags = FTS5INDEX_QUERY_SCAN - | FTS5INDEX_QUERY_SKIPEMPTY - | FTS5INDEX_QUERY_NOOUTPUT; + void *pCtx = (void*)&s; int i; - i64 iLastRowid = 0; - Fts5Iter *p1 = 0; /* Iterator used to gather data from index */ Fts5Data *pData; - Fts5Buffer doclist; - int bNewTerm = 1; - - memset(&doclist, 0, sizeof(doclist)); /* If iIdx is non-zero, then it is the number of a prefix-index for ** prefixes 1 character longer than the prefix being queried for. That @@ -246770,94 +249512,45 @@ static void fts5SetupPrefixIter( ** corresponding to the prefix itself. That one is extracted from the ** main term index here. */ if( iIdx!=0 ){ - int dummy = 0; - const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT; pToken[0] = FTS5_MAIN_PREFIX; - fts5MultiIterNew(p, pStruct, f2, pColset, pToken, nToken, -1, 0, &p1); - fts5IterSetOutputCb(&p->rc, p1); - for(; - fts5MultiIterEof(p, p1)==0; - fts5MultiIterNext2(p, p1, &dummy) - ){ - Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; - p1->xSetOutputs(p1, pSeg); - if( p1->base.nData ){ - xAppend(p, (u64)p1->base.iRowid-(u64)iLastRowid, p1, &doclist); - iLastRowid = p1->base.iRowid; - } - } - fts5MultiIterFree(p1); + fts5VisitEntries(p, pColset, pToken, nToken, 0, prefixIterSetupCb, pCtx); } pToken[0] = FTS5_MAIN_PREFIX + iIdx; - fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); - fts5IterSetOutputCb(&p->rc, p1); + fts5VisitEntries(p, pColset, pToken, nToken, 1, prefixIterSetupCb, pCtx); - for( /* no-op */ ; - fts5MultiIterEof(p, p1)==0; - fts5MultiIterNext2(p, p1, &bNewTerm) - ){ - Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; - int nTerm = pSeg->term.n; - const u8 *pTerm = pSeg->term.p; - p1->xSetOutputs(p1, pSeg); - - assert_nc( memcmp(pToken, pTerm, MIN(nToken, nTerm))<=0 ); - if( bNewTerm ){ - if( nTermbase.nData==0 ) continue; - if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){ - for(i=0; p->rc==SQLITE_OK && doclist.n; i++){ - int i1 = i*nMerge; - int iStore; - assert( i1+nMerge<=nBuf ); - for(iStore=i1; iStorebase.iRowid-(u64)iLastRowid, p1, &doclist); - iLastRowid = p1->base.iRowid; - } - - assert( (nBuf%nMerge)==0 ); - for(i=0; irc==SQLITE_OK ){ - xMerge(p, &doclist, nMerge, &aBuf[i]); + s.xMerge(p, &s.doclist, s.nMerge, &s.aBuf[i]); } - for(iFree=i; iFreerc!=SQLITE_OK ); if( pData ){ pData->p = (u8*)&pData[1]; - pData->nn = pData->szLeaf = doclist.n; - if( doclist.n ) memcpy(pData->p, doclist.p, doclist.n); + pData->nn = pData->szLeaf = s.doclist.n; + if( s.doclist.n ) memcpy(pData->p, s.doclist.p, s.doclist.n); fts5MultiIterNew2(p, pData, bDesc, ppIter); } - fts5BufferFree(&doclist); + + assert( (*ppIter)!=0 || p->rc!=SQLITE_OK ); + if( p->rc==SQLITE_OK && s.pTokendata ){ + fts5TokendataIterSortMap(p, s2.pT); + (*ppIter)->pTokenDataIter = s2.pT; + s2.pT = 0; + } } + fts5TokendataIterDelete(s2.pT); + fts5BufferFree(&s.doclist); fts5StructureRelease(pStruct); - sqlite3_free(aBuf); + sqlite3_free(s.aBuf); } @@ -246895,7 +249588,7 @@ static int sqlite3Fts5IndexBeginWrite(Fts5Index *p, int bDelete, i64 iRowid){ static int sqlite3Fts5IndexSync(Fts5Index *p){ assert( p->rc==SQLITE_OK ); fts5IndexFlush(p); - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); return fts5IndexReturn(p); } @@ -246906,11 +249599,10 @@ static int sqlite3Fts5IndexSync(Fts5Index *p){ ** records must be invalidated. */ static int sqlite3Fts5IndexRollback(Fts5Index *p){ - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); fts5IndexDiscardData(p); fts5StructureInvalidate(p); - /* assert( p->rc==SQLITE_OK ); */ - return SQLITE_OK; + return fts5IndexReturn(p); } /* @@ -247111,37 +249803,15 @@ static void fts5SegIterSetEOF(Fts5SegIter *pSeg){ pSeg->pLeaf = 0; } -/* -** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an -** array of these for each row it visits. Or, for an iterator used by an -** "ORDER BY rank" query, it accumulates an array of these for the entire -** query. -** -** Each instance in the array indicates the iterator (and therefore term) -** associated with position iPos of rowid iRowid. This is used by the -** xInstToken() API. -*/ -struct Fts5TokenDataMap { - i64 iRowid; /* Row this token is located in */ - i64 iPos; /* Position of token */ - int iIter; /* Iterator token was read from */ -}; - -/* -** An object used to supplement Fts5Iter for tokendata=1 iterators. -*/ -struct Fts5TokenDataIter { - int nIter; - int nIterAlloc; - - int nMap; - int nMapAlloc; - Fts5TokenDataMap *aMap; - - Fts5PoslistReader *aPoslistReader; - int *aPoslistToIter; - Fts5Iter *apIter[1]; -}; +static void fts5IterClose(Fts5IndexIter *pIndexIter){ + if( pIndexIter ){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5Index *pIndex = pIter->pIndex; + fts5TokendataIterDelete(pIter->pTokenDataIter); + fts5MultiIterFree(pIter); + fts5IndexCloseReader(pIndex); + } +} /* ** This function appends iterator pAppend to Fts5TokenDataIter pIn and @@ -247170,7 +249840,7 @@ static Fts5TokenDataIter *fts5AppendTokendataIter( } } if( p->rc ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pAppend); + fts5IterClose((Fts5IndexIter*)pAppend); }else{ pRet->apIter[pRet->nIter++] = pAppend; } @@ -247179,54 +249849,6 @@ static Fts5TokenDataIter *fts5AppendTokendataIter( return pRet; } -/* -** Delete an Fts5TokenDataIter structure and its contents. -*/ -static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){ - if( pSet ){ - int ii; - for(ii=0; iinIter; ii++){ - fts5MultiIterFree(pSet->apIter[ii]); - } - sqlite3_free(pSet->aPoslistReader); - sqlite3_free(pSet->aMap); - sqlite3_free(pSet); - } -} - -/* -** Append a mapping to the token-map belonging to object pT. -*/ -static void fts5TokendataIterAppendMap( - Fts5Index *p, - Fts5TokenDataIter *pT, - int iIter, - i64 iRowid, - i64 iPos -){ - if( p->rc==SQLITE_OK ){ - if( pT->nMap==pT->nMapAlloc ){ - int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64; - int nByte = nNew * sizeof(Fts5TokenDataMap); - Fts5TokenDataMap *aNew; - - aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nByte); - if( aNew==0 ){ - p->rc = SQLITE_NOMEM; - return; - } - - pT->aMap = aNew; - pT->nMapAlloc = nNew; - } - - pT->aMap[pT->nMap].iRowid = iRowid; - pT->aMap[pT->nMap].iPos = iPos; - pT->aMap[pT->nMap].iIter = iIter; - pT->nMap++; - } -} - /* ** The iterator passed as the only argument must be a tokendata=1 iterator ** (pIter->pTokenDataIter!=0). This function sets the iterator output @@ -247267,7 +249889,7 @@ static void fts5IterSetOutputsTokendata(Fts5Iter *pIter){ pIter->base.iRowid = iRowid; if( nHit==1 && eDetail==FTS5_DETAIL_FULL ){ - fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, iRowid, -1); + fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, 0, iRowid, -1); }else if( nHit>1 && eDetail!=FTS5_DETAIL_NONE ){ int nReader = 0; @@ -247431,7 +250053,7 @@ static Fts5Iter *fts5SetupTokendataIter( fts5BufferSet(&p->rc, &bSeek, nToken, pToken); } if( p->rc ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + fts5IterClose((Fts5IndexIter*)pNew); break; } @@ -247496,7 +250118,7 @@ static Fts5Iter *fts5SetupTokendataIter( ** not point to any terms that match the query. So delete it and break ** out of the loop - all required iterators have been collected. */ if( pSmall==0 ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + fts5IterClose((Fts5IndexIter*)pNew); break; } @@ -247520,6 +250142,7 @@ static Fts5Iter *fts5SetupTokendataIter( pRet = fts5MultiIterAlloc(p, 0); } if( pRet ){ + pRet->nSeg = 0; pRet->pTokenDataIter = pSet; if( pSet ){ fts5IterSetOutputsTokendata(pRet); @@ -247535,7 +250158,6 @@ static Fts5Iter *fts5SetupTokendataIter( return pRet; } - /* ** Open a new iterator to iterate though all rowid that match the ** specified token or token prefix. @@ -247558,8 +250180,14 @@ static int sqlite3Fts5IndexQuery( int iIdx = 0; /* Index to search */ int iPrefixIdx = 0; /* +1 prefix index */ int bTokendata = pConfig->bTokendata; + assert( buf.p!=0 ); if( nToken>0 ) memcpy(&buf.p[1], pToken, nToken); + /* The NOTOKENDATA flag is set when each token in a tokendata=1 table + ** should be treated individually, instead of merging all those with + ** a common prefix into a single entry. This is used, for example, by + ** queries performed as part of an integrity-check, or by the fts5vocab + ** module. */ if( flags & (FTS5INDEX_QUERY_NOTOKENDATA|FTS5INDEX_QUERY_SCAN) ){ bTokendata = 0; } @@ -247590,7 +250218,7 @@ static int sqlite3Fts5IndexQuery( } if( bTokendata && iIdx==0 ){ - buf.p[0] = '0'; + buf.p[0] = FTS5_MAIN_PREFIX; pRet = fts5SetupTokendataIter(p, buf.p, nToken+1, pColset); }else if( iIdx<=pConfig->nPrefix ){ /* Straight index lookup */ @@ -247603,7 +250231,7 @@ static int sqlite3Fts5IndexQuery( fts5StructureRelease(pStruct); } }else{ - /* Scan multiple terms in the main index */ + /* Scan multiple terms in the main index for a prefix query. */ int bDesc = (flags & FTS5INDEX_QUERY_DESC)!=0; fts5SetupPrefixIter(p, bDesc, iPrefixIdx, buf.p, nToken+1, pColset,&pRet); if( pRet==0 ){ @@ -247619,9 +250247,9 @@ static int sqlite3Fts5IndexQuery( } if( p->rc ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pRet); + fts5IterClose((Fts5IndexIter*)pRet); pRet = 0; - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); } *ppIter = (Fts5IndexIter*)pRet; @@ -247639,7 +250267,8 @@ static int sqlite3Fts5IndexQuery( static int sqlite3Fts5IterNext(Fts5IndexIter *pIndexIter){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; assert( pIter->pIndex->rc==SQLITE_OK ); - if( pIter->pTokenDataIter ){ + if( pIter->nSeg==0 ){ + assert( pIter->pTokenDataIter ); fts5TokendataIterNext(pIter, 0, 0); }else{ fts5MultiIterNext(pIter->pIndex, pIter, 0, 0); @@ -247676,7 +250305,8 @@ static int sqlite3Fts5IterNextScan(Fts5IndexIter *pIndexIter){ */ static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - if( pIter->pTokenDataIter ){ + if( pIter->nSeg==0 ){ + assert( pIter->pTokenDataIter ); fts5TokendataIterNext(pIter, 1, iMatch); }else{ fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch); @@ -247695,14 +250325,61 @@ static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){ return (z ? &z[1] : 0); } +/* +** pIter is a prefix query. This function populates pIter->pTokenDataIter +** with an Fts5TokenDataIter object containing mappings for all rows +** matched by the query. +*/ +static int fts5SetupPrefixIterTokendata( + Fts5Iter *pIter, + const char *pToken, /* Token prefix to search for */ + int nToken /* Size of pToken in bytes */ +){ + Fts5Index *p = pIter->pIndex; + Fts5Buffer token = {0, 0, 0}; + TokendataSetupCtx ctx; + + memset(&ctx, 0, sizeof(ctx)); + + fts5BufferGrow(&p->rc, &token, nToken+1); + assert( token.p!=0 || p->rc!=SQLITE_OK ); + ctx.pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, sizeof(*ctx.pT)); + + if( p->rc==SQLITE_OK ){ + + /* Fill in the token prefix to search for */ + token.p[0] = FTS5_MAIN_PREFIX; + memcpy(&token.p[1], pToken, nToken); + token.n = nToken+1; + + fts5VisitEntries( + p, 0, token.p, token.n, 1, prefixIterSetupTokendataCb, (void*)&ctx + ); + + fts5TokendataIterSortMap(p, ctx.pT); + } + + if( p->rc==SQLITE_OK ){ + pIter->pTokenDataIter = ctx.pT; + }else{ + fts5TokendataIterDelete(ctx.pT); + } + fts5BufferFree(&token); + + return fts5IndexReturn(p); +} + /* ** This is used by xInstToken() to access the token at offset iOff, column ** iCol of row iRowid. The token is returned via output variables *ppOut ** and *pnOut. The iterator passed as the first argument must be a tokendata=1 ** iterator (pIter->pTokenDataIter!=0). +** +** pToken/nToken: */ static int sqlite3Fts5IterToken( Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, i64 iRowid, int iCol, int iOff, @@ -247710,13 +250387,22 @@ static int sqlite3Fts5IterToken( ){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; Fts5TokenDataIter *pT = pIter->pTokenDataIter; - Fts5TokenDataMap *aMap = pT->aMap; i64 iPos = (((i64)iCol)<<32) + iOff; - + Fts5TokenDataMap *aMap = 0; int i1 = 0; - int i2 = pT->nMap; + int i2 = 0; int iTest = 0; + assert( pT || (pToken && pIter->nSeg>0) ); + if( pT==0 ){ + int rc = fts5SetupPrefixIterTokendata(pIter, pToken, nToken); + if( rc!=SQLITE_OK ) return rc; + pT = pIter->pTokenDataIter; + } + + i2 = pT->nMap; + aMap = pT->aMap; + while( i2>i1 ){ iTest = (i1 + i2) / 2; @@ -247739,9 +250425,15 @@ static int sqlite3Fts5IterToken( } if( i2>i1 ){ - Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter]; - *ppOut = (const char*)pMap->aSeg[0].term.p+1; - *pnOut = pMap->aSeg[0].term.n-1; + if( pIter->nSeg==0 ){ + Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter]; + *ppOut = (const char*)pMap->aSeg[0].term.p+1; + *pnOut = pMap->aSeg[0].term.n-1; + }else{ + Fts5TokenDataMap *p = &aMap[iTest]; + *ppOut = (const char*)&pT->terms.p[p->iIter]; + *pnOut = aMap[iTest].nByte; + } } return SQLITE_OK; @@ -247753,7 +250445,9 @@ static int sqlite3Fts5IterToken( */ static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter *pIndexIter){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - if( pIter && pIter->pTokenDataIter ){ + if( pIter && pIter->pTokenDataIter + && (pIter->nSeg==0 || pIter->pIndex->pConfig->eDetail!=FTS5_DETAIL_FULL) + ){ pIter->pTokenDataIter->nMap = 0; } } @@ -247773,17 +250467,29 @@ static int sqlite3Fts5IndexIterWriteTokendata( Fts5Iter *pIter = (Fts5Iter*)pIndexIter; Fts5TokenDataIter *pT = pIter->pTokenDataIter; Fts5Index *p = pIter->pIndex; - int ii; + i64 iPos = (((i64)iCol)<<32) + iOff; assert( p->pConfig->eDetail!=FTS5_DETAIL_FULL ); - assert( pIter->pTokenDataIter ); - - for(ii=0; iinIter; ii++){ - Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term; - if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break; - } - if( iinIter ){ - fts5TokendataIterAppendMap(p, pT, ii, iRowid, (((i64)iCol)<<32) + iOff); + assert( pIter->pTokenDataIter || pIter->nSeg>0 ); + if( pIter->nSeg>0 ){ + /* This is a prefix term iterator. */ + if( pT==0 ){ + pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, sizeof(*pT)); + pIter->pTokenDataIter = pT; + } + if( pT ){ + fts5TokendataIterAppendMap(p, pT, pT->terms.n, nToken, iRowid, iPos); + fts5BufferAppendBlob(&p->rc, &pT->terms, nToken, (const u8*)pToken); + } + }else{ + int ii; + for(ii=0; iinIter; ii++){ + Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term; + if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break; + } + if( iinIter ){ + fts5TokendataIterAppendMap(p, pT, ii, 0, iRowid, iPos); + } } return fts5IndexReturn(p); } @@ -247793,11 +250499,9 @@ static int sqlite3Fts5IndexIterWriteTokendata( */ static void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){ if( pIndexIter ){ - Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - Fts5Index *pIndex = pIter->pIndex; - fts5TokendataIterDelete(pIter->pTokenDataIter); - fts5MultiIterFree(pIter); - sqlite3Fts5IndexCloseReader(pIndex); + Fts5Index *pIndex = ((Fts5Iter*)pIndexIter)->pIndex; + fts5IterClose(pIndexIter); + fts5IndexReturn(pIndex); } } @@ -248327,7 +251031,7 @@ static int fts5QueryCksum( rc = sqlite3Fts5IterNext(pIter); } } - sqlite3Fts5IterClose(pIter); + fts5IterClose(pIter); *pCksum = cksum; return rc; @@ -249336,7 +252040,7 @@ static int fts5structConnectMethod( /* ** We must have a single struct=? constraint that will be passed through -** into the xFilter method. If there is no valid stmt=? constraint, +** into the xFilter method. If there is no valid struct=? constraint, ** then return an SQLITE_CONSTRAINT error. */ static int fts5structBestIndexMethod( @@ -249678,8 +252382,18 @@ struct Fts5Global { Fts5TokenizerModule *pTok; /* First in list of all tokenizer modules */ Fts5TokenizerModule *pDfltTok; /* Default tokenizer module */ Fts5Cursor *pCsr; /* First in list of all open cursors */ + u32 aLocaleHdr[4]; }; +/* +** Size of header on fts5_locale() values. And macro to access a buffer +** containing a copy of the header from an Fts5Config pointer. +*/ +#define FTS5_LOCALE_HDR_SIZE ((int)sizeof( ((Fts5Global*)0)->aLocaleHdr )) +#define FTS5_LOCALE_HDR(pConfig) ((const u8*)(pConfig->pGlobal->aLocaleHdr)) + +#define FTS5_INSTTOKEN_SUBTYPE 73 + /* ** Each auxiliary function registered with the FTS5 module is represented ** by an object of the following type. All such objects are stored as part @@ -249698,11 +252412,28 @@ struct Fts5Auxiliary { ** Each tokenizer module registered with the FTS5 module is represented ** by an object of the following type. All such objects are stored as part ** of the Fts5Global.pTok list. +** +** bV2Native: +** True if the tokenizer was registered using xCreateTokenizer_v2(), false +** for xCreateTokenizer(). If this variable is true, then x2 is populated +** with the routines as supplied by the caller and x1 contains synthesized +** wrapper routines. In this case the user-data pointer passed to +** x1.xCreate should be a pointer to the Fts5TokenizerModule structure, +** not a copy of pUserData. +** +** Of course, if bV2Native is false, then x1 contains the real routines and +** x2 the synthesized ones. In this case a pointer to the Fts5TokenizerModule +** object should be passed to x2.xCreate. +** +** The synthesized wrapper routines are necessary for xFindTokenizer(_v2) +** calls. */ struct Fts5TokenizerModule { char *zName; /* Name of tokenizer */ void *pUserData; /* User pointer passed to xCreate() */ - fts5_tokenizer x; /* Tokenizer functions */ + int bV2Native; /* True if v2 native tokenizer */ + fts5_tokenizer x1; /* Tokenizer functions */ + fts5_tokenizer_v2 x2; /* V2 tokenizer functions */ void (*xDestroy)(void*); /* Destructor function */ Fts5TokenizerModule *pNext; /* Next registered tokenizer module */ }; @@ -249790,7 +252521,7 @@ struct Fts5Cursor { Fts5Auxiliary *pAux; /* Currently executing extension function */ Fts5Auxdata *pAuxdata; /* First in linked list of saved aux-data */ - /* Cache used by auxiliary functions xInst() and xInstCount() */ + /* Cache used by auxiliary API functions xInst() and xInstCount() */ Fts5PoslistReader *aInstIter; /* One for each phrase */ int nInstAlloc; /* Size of aInst[] array (entries / 3) */ int nInstCount; /* Number of phrase instances */ @@ -249901,10 +252632,16 @@ static void fts5CheckTransactionState(Fts5FullTable *p, int op, int iSavepoint){ #endif /* -** Return true if pTab is a contentless table. +** Return true if pTab is a contentless table. If parameter bIncludeUnindexed +** is true, this includes contentless tables that store UNINDEXED columns +** only. */ -static int fts5IsContentless(Fts5FullTable *pTab){ - return pTab->p.pConfig->eContent==FTS5_CONTENT_NONE; +static int fts5IsContentless(Fts5FullTable *pTab, int bIncludeUnindexed){ + int eContent = pTab->p.pConfig->eContent; + return ( + eContent==FTS5_CONTENT_NONE + || (bIncludeUnindexed && eContent==FTS5_CONTENT_UNINDEXED) + ); } /* @@ -249972,8 +252709,12 @@ static int fts5InitVtab( assert( (rc==SQLITE_OK && *pzErr==0) || pConfig==0 ); } if( rc==SQLITE_OK ){ + pConfig->pzErrmsg = pzErr; pTab->p.pConfig = pConfig; pTab->pGlobal = pGlobal; + if( bCreate || sqlite3Fts5TokenizerPreload(&pConfig->t) ){ + rc = sqlite3Fts5LoadTokenizer(pConfig); + } } /* Open the index sub-system */ @@ -249995,11 +252736,7 @@ static int fts5InitVtab( /* Load the initial configuration */ if( rc==SQLITE_OK ){ - assert( pConfig->pzErrmsg==0 ); - pConfig->pzErrmsg = pzErr; - rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); - sqlite3Fts5IndexRollback(pTab->p.pIndex); - pConfig->pzErrmsg = 0; + rc = sqlite3Fts5ConfigLoad(pTab->p.pConfig, pTab->p.pConfig->iCookie-1); } if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){ @@ -250009,6 +252746,7 @@ static int fts5InitVtab( rc = sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); } + if( pConfig ) pConfig->pzErrmsg = 0; if( rc!=SQLITE_OK ){ fts5FreeVtab(pTab); pTab = 0; @@ -250076,10 +252814,10 @@ static int fts5UsePatternMatch( ){ assert( FTS5_PATTERN_GLOB==SQLITE_INDEX_CONSTRAINT_GLOB ); assert( FTS5_PATTERN_LIKE==SQLITE_INDEX_CONSTRAINT_LIKE ); - if( pConfig->ePattern==FTS5_PATTERN_GLOB && p->op==FTS5_PATTERN_GLOB ){ + if( pConfig->t.ePattern==FTS5_PATTERN_GLOB && p->op==FTS5_PATTERN_GLOB ){ return 1; } - if( pConfig->ePattern==FTS5_PATTERN_LIKE + if( pConfig->t.ePattern==FTS5_PATTERN_LIKE && (p->op==FTS5_PATTERN_LIKE || p->op==FTS5_PATTERN_GLOB) ){ return 1; @@ -250126,10 +252864,10 @@ static int fts5UsePatternMatch( ** This function ensures that there is at most one "r" or "=". And that if ** there exists an "=" then there is no "<" or ">". ** -** Costs are assigned as follows: +** If an unusable MATCH operator is present in the WHERE clause, then +** SQLITE_CONSTRAINT is returned. ** -** a) If an unusable MATCH operator is present in the WHERE clause, the -** cost is unconditionally set to 1e50 (a really big number). +** Costs are assigned as follows: ** ** a) If a MATCH operator is present, the cost depends on the other ** constraints also present. As follows: @@ -250162,7 +252900,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ int bSeenEq = 0; int bSeenGt = 0; int bSeenLt = 0; - int bSeenMatch = 0; + int nSeenMatch = 0; int bSeenRank = 0; @@ -250193,18 +252931,16 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ /* A MATCH operator or equivalent */ if( p->usable==0 || iCol<0 ){ /* As there exists an unusable MATCH constraint this is an - ** unusable plan. Set a prohibitively high cost. */ - pInfo->estimatedCost = 1e50; - assert( iIdxStr < pInfo->nConstraint*6 + 1 ); + ** unusable plan. Return SQLITE_CONSTRAINT. */ idxStr[iIdxStr] = 0; - return SQLITE_OK; + return SQLITE_CONSTRAINT; }else{ if( iCol==nCol+1 ){ if( bSeenRank ) continue; idxStr[iIdxStr++] = 'r'; bSeenRank = 1; - }else if( iCol>=0 ){ - bSeenMatch = 1; + }else{ + nSeenMatch++; idxStr[iIdxStr++] = 'M'; sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol); idxStr += strlen(&idxStr[iIdxStr]); @@ -250221,6 +252957,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ idxStr += strlen(&idxStr[iIdxStr]); pInfo->aConstraintUsage[i].argvIndex = ++iCons; assert( idxStr[iIdxStr]=='\0' ); + nSeenMatch++; }else if( bSeenEq==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol<0 ){ idxStr[iIdxStr++] = '='; bSeenEq = 1; @@ -250257,7 +252994,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ */ if( pInfo->nOrderBy==1 ){ int iSort = pInfo->aOrderBy[0].iColumn; - if( iSort==(pConfig->nCol+1) && bSeenMatch ){ + if( iSort==(pConfig->nCol+1) && nSeenMatch>0 ){ idxFlags |= FTS5_BI_ORDER_RANK; }else if( iSort==-1 && (!pInfo->aOrderBy[0].desc || !pConfig->bTokendata) ){ idxFlags |= FTS5_BI_ORDER_ROWID; @@ -250272,14 +253009,17 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ /* Calculate the estimated cost based on the flags set in idxFlags. */ if( bSeenEq ){ - pInfo->estimatedCost = bSeenMatch ? 100.0 : 10.0; - if( bSeenMatch==0 ) fts5SetUniqueFlag(pInfo); + pInfo->estimatedCost = nSeenMatch ? 1000.0 : 10.0; + if( nSeenMatch==0 ) fts5SetUniqueFlag(pInfo); }else if( bSeenLt && bSeenGt ){ - pInfo->estimatedCost = bSeenMatch ? 500.0 : 250000.0; + pInfo->estimatedCost = nSeenMatch ? 5000.0 : 250000.0; }else if( bSeenLt || bSeenGt ){ - pInfo->estimatedCost = bSeenMatch ? 750.0 : 750000.0; + pInfo->estimatedCost = nSeenMatch ? 7500.0 : 750000.0; }else{ - pInfo->estimatedCost = bSeenMatch ? 1000.0 : 1000000.0; + pInfo->estimatedCost = nSeenMatch ? 10000.0 : 1000000.0; + } + for(i=1; iestimatedCost *= 0.4; } pInfo->idxNum = idxFlags; @@ -250555,6 +253295,7 @@ static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){ } }else{ rc = SQLITE_OK; + CsrFlagSet(pCsr, FTS5CSR_REQUIRE_DOCSIZE); } break; } @@ -250584,7 +253325,7 @@ static int fts5PrepareStatement( rc = sqlite3_prepare_v3(pConfig->db, zSql, -1, SQLITE_PREPARE_PERSISTENT, &pRet, 0); if( rc!=SQLITE_OK ){ - *pConfig->pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(pConfig->db)); + sqlite3Fts5ConfigErrmsg(pConfig, "%s", sqlite3_errmsg(pConfig->db)); } sqlite3_free(zSql); } @@ -250808,6 +253549,145 @@ static i64 fts5GetRowidLimit(sqlite3_value *pVal, i64 iDefault){ return iDefault; } +/* +** Set the error message on the virtual table passed as the first argument. +*/ +static void fts5SetVtabError(Fts5FullTable *p, const char *zFormat, ...){ + va_list ap; /* ... printf arguments */ + va_start(ap, zFormat); + sqlite3_free(p->p.base.zErrMsg); + p->p.base.zErrMsg = sqlite3_vmprintf(zFormat, ap); + va_end(ap); +} + +/* +** Arrange for subsequent calls to sqlite3Fts5Tokenize() to use the locale +** specified by pLocale/nLocale. The buffer indicated by pLocale must remain +** valid until after the final call to sqlite3Fts5Tokenize() that will use +** the locale. +*/ +static void sqlite3Fts5SetLocale( + Fts5Config *pConfig, + const char *zLocale, + int nLocale +){ + Fts5TokenizerConfig *pT = &pConfig->t; + pT->pLocale = zLocale; + pT->nLocale = nLocale; +} + +/* +** Clear any locale configured by an earlier call to sqlite3Fts5SetLocale(). +*/ +static void sqlite3Fts5ClearLocale(Fts5Config *pConfig){ + sqlite3Fts5SetLocale(pConfig, 0, 0); +} + +/* +** Return true if the value passed as the only argument is an +** fts5_locale() value. +*/ +static int sqlite3Fts5IsLocaleValue(Fts5Config *pConfig, sqlite3_value *pVal){ + int ret = 0; + if( sqlite3_value_type(pVal)==SQLITE_BLOB ){ + /* Call sqlite3_value_bytes() after sqlite3_value_blob() in this case. + ** If the blob was created using zeroblob(), then sqlite3_value_blob() + ** may call malloc(). If this malloc() fails, then the values returned + ** by both value_blob() and value_bytes() will be 0. If value_bytes() were + ** called first, then the NULL pointer returned by value_blob() might + ** be dereferenced. */ + const u8 *pBlob = sqlite3_value_blob(pVal); + int nBlob = sqlite3_value_bytes(pVal); + if( nBlob>FTS5_LOCALE_HDR_SIZE + && 0==memcmp(pBlob, FTS5_LOCALE_HDR(pConfig), FTS5_LOCALE_HDR_SIZE) + ){ + ret = 1; + } + } + return ret; +} + +/* +** Value pVal is guaranteed to be an fts5_locale() value, according to +** sqlite3Fts5IsLocaleValue(). This function extracts the text and locale +** from the value and returns them separately. +** +** If successful, SQLITE_OK is returned and (*ppText) and (*ppLoc) set +** to point to buffers containing the text and locale, as utf-8, +** respectively. In this case output parameters (*pnText) and (*pnLoc) are +** set to the sizes in bytes of these two buffers. +** +** Or, if an error occurs, then an SQLite error code is returned. The final +** value of the four output parameters is undefined in this case. +*/ +static int sqlite3Fts5DecodeLocaleValue( + sqlite3_value *pVal, + const char **ppText, + int *pnText, + const char **ppLoc, + int *pnLoc +){ + const char *p = sqlite3_value_blob(pVal); + int n = sqlite3_value_bytes(pVal); + int nLoc = 0; + + assert( sqlite3_value_type(pVal)==SQLITE_BLOB ); + assert( n>FTS5_LOCALE_HDR_SIZE ); + + for(nLoc=FTS5_LOCALE_HDR_SIZE; p[nLoc]; nLoc++){ + if( nLoc==(n-1) ){ + return SQLITE_MISMATCH; + } + } + *ppLoc = &p[FTS5_LOCALE_HDR_SIZE]; + *pnLoc = nLoc - FTS5_LOCALE_HDR_SIZE; + + *ppText = &p[nLoc+1]; + *pnText = n - nLoc - 1; + return SQLITE_OK; +} + +/* +** Argument pVal is the text of a full-text search expression. It may or +** may not have been wrapped by fts5_locale(). This function extracts +** the text of the expression, and sets output variable (*pzText) to +** point to a nul-terminated buffer containing the expression. +** +** If pVal was an fts5_locale() value, then sqlite3Fts5SetLocale() is called +** to set the tokenizer to use the specified locale. +** +** If output variable (*pbFreeAndReset) is set to true, then the caller +** is required to (a) call sqlite3Fts5ClearLocale() to reset the tokenizer +** locale, and (b) call sqlite3_free() to free (*pzText). +*/ +static int fts5ExtractExprText( + Fts5Config *pConfig, /* Fts5 configuration */ + sqlite3_value *pVal, /* Value to extract expression text from */ + char **pzText, /* OUT: nul-terminated buffer of text */ + int *pbFreeAndReset /* OUT: Free (*pzText) and clear locale */ +){ + int rc = SQLITE_OK; + + if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + *pzText = sqlite3Fts5Mprintf(&rc, "%.*s", nText, pText); + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + } + *pbFreeAndReset = 1; + }else{ + *pzText = (char*)sqlite3_value_text(pVal); + *pbFreeAndReset = 0; + } + + return rc; +} + + /* ** This is the xFilter interface for the virtual table. See ** the virtual table xFilter method documentation for additional @@ -250838,17 +253718,12 @@ static int fts5FilterMethod( sqlite3_value *pRowidGe = 0; /* rowid >= ? expression (or NULL) */ int iCol; /* Column on LHS of MATCH operator */ char **pzErrmsg = pConfig->pzErrmsg; + int bPrefixInsttoken = pConfig->bPrefixInsttoken; int i; int iIdxStr = 0; Fts5Expr *pExpr = 0; - if( pConfig->bLock ){ - pTab->p.base.zErrMsg = sqlite3_mprintf( - "recursively defined fts5 content table" - ); - return SQLITE_ERROR; - } - + assert( pConfig->bLock==0 ); if( pCsr->ePlan ){ fts5FreeCursorComponents(pCsr); memset(&pCsr->ePlan, 0, sizeof(Fts5Cursor) - ((u8*)&pCsr->ePlan-(u8*)pCsr)); @@ -250872,8 +253747,17 @@ static int fts5FilterMethod( pRank = apVal[i]; break; case 'M': { - const char *zText = (const char*)sqlite3_value_text(apVal[i]); + char *zText = 0; + int bFreeAndReset = 0; + int bInternal = 0; + + rc = fts5ExtractExprText(pConfig, apVal[i], &zText, &bFreeAndReset); + if( rc!=SQLITE_OK ) goto filter_out; if( zText==0 ) zText = ""; + if( sqlite3_value_subtype(apVal[i])==FTS5_INSTTOKEN_SUBTYPE ){ + pConfig->bPrefixInsttoken = 1; + } + iCol = 0; do{ iCol = iCol*10 + (idxStr[iIdxStr]-'0'); @@ -250885,7 +253769,7 @@ static int fts5FilterMethod( ** indicates that the MATCH expression is not a full text query, ** but a request for an internal parameter. */ rc = fts5SpecialMatch(pTab, pCsr, &zText[1]); - goto filter_out; + bInternal = 1; }else{ char **pzErr = &pTab->p.base.zErrMsg; rc = sqlite3Fts5ExprNew(pConfig, 0, iCol, zText, &pExpr, pzErr); @@ -250893,9 +253777,15 @@ static int fts5FilterMethod( rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr); pExpr = 0; } - if( rc!=SQLITE_OK ) goto filter_out; } + if( bFreeAndReset ){ + sqlite3_free(zText); + sqlite3Fts5ClearLocale(pConfig); + } + + if( bInternal || rc!=SQLITE_OK ) goto filter_out; + break; } case 'L': @@ -250983,9 +253873,7 @@ static int fts5FilterMethod( } } }else if( pConfig->zContent==0 ){ - *pConfig->pzErrmsg = sqlite3_mprintf( - "%s: table does not support scanning", pConfig->zName - ); + fts5SetVtabError(pTab,"%s: table does not support scanning",pConfig->zName); rc = SQLITE_ERROR; }else{ /* This is either a full-table scan (ePlan==FTS5_PLAN_SCAN) or a lookup @@ -251009,6 +253897,7 @@ static int fts5FilterMethod( filter_out: sqlite3Fts5ExprFree(pExpr); pConfig->pzErrmsg = pzErrmsg; + pConfig->bPrefixInsttoken = bPrefixInsttoken; return rc; } @@ -251028,9 +253917,13 @@ static i64 fts5CursorRowid(Fts5Cursor *pCsr){ assert( pCsr->ePlan==FTS5_PLAN_MATCH || pCsr->ePlan==FTS5_PLAN_SORTED_MATCH || pCsr->ePlan==FTS5_PLAN_SOURCE + || pCsr->ePlan==FTS5_PLAN_SCAN + || pCsr->ePlan==FTS5_PLAN_ROWID ); if( pCsr->pSorter ){ return pCsr->pSorter->iRowid; + }else if( pCsr->ePlan>=FTS5_PLAN_SCAN ){ + return sqlite3_column_int64(pCsr->pStmt, 0); }else{ return sqlite3Fts5ExprRowid(pCsr->pExpr); } @@ -251047,25 +253940,16 @@ static int fts5RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ int ePlan = pCsr->ePlan; assert( CsrFlagTest(pCsr, FTS5CSR_EOF)==0 ); - switch( ePlan ){ - case FTS5_PLAN_SPECIAL: - *pRowid = 0; - break; - - case FTS5_PLAN_SOURCE: - case FTS5_PLAN_MATCH: - case FTS5_PLAN_SORTED_MATCH: - *pRowid = fts5CursorRowid(pCsr); - break; - - default: - *pRowid = sqlite3_column_int64(pCsr->pStmt, 0); - break; + if( ePlan==FTS5_PLAN_SPECIAL ){ + *pRowid = 0; + }else{ + *pRowid = fts5CursorRowid(pCsr); } return SQLITE_OK; } + /* ** If the cursor requires seeking (bSeekRequired flag is set), seek it. ** Return SQLITE_OK if no error occurs, or an SQLite error code otherwise. @@ -251102,8 +253986,13 @@ static int fts5SeekCursor(Fts5Cursor *pCsr, int bErrormsg){ rc = sqlite3_reset(pCsr->pStmt); if( rc==SQLITE_OK ){ rc = FTS5_CORRUPT; + fts5SetVtabError((Fts5FullTable*)pTab, + "fts5: missing row %lld from content table %s", + fts5CursorRowid(pCsr), + pTab->pConfig->zContent + ); }else if( pTab->pConfig->pzErrmsg ){ - *pTab->pConfig->pzErrmsg = sqlite3_mprintf( + fts5SetVtabError((Fts5FullTable*)pTab, "%s", sqlite3_errmsg(pTab->pConfig->db) ); } @@ -251112,14 +254001,6 @@ static int fts5SeekCursor(Fts5Cursor *pCsr, int bErrormsg){ return rc; } -static void fts5SetVtabError(Fts5FullTable *p, const char *zFormat, ...){ - va_list ap; /* ... printf arguments */ - va_start(ap, zFormat); - assert( p->p.base.zErrMsg==0 ); - p->p.base.zErrMsg = sqlite3_vmprintf(zFormat, ap); - va_end(ap); -} - /* ** This function is called to handle an FTS INSERT command. In other words, ** an INSERT statement of the form: @@ -251157,7 +254038,7 @@ static int fts5SpecialInsert( } bLoadConfig = 1; }else if( 0==sqlite3_stricmp("rebuild", zCmd) ){ - if( pConfig->eContent==FTS5_CONTENT_NONE ){ + if( fts5IsContentless(pTab, 1) ){ fts5SetVtabError(pTab, "'rebuild' may not be used with a contentless fts5 table" ); @@ -251213,7 +254094,7 @@ static int fts5SpecialDelete( int eType1 = sqlite3_value_type(apVal[1]); if( eType1==SQLITE_INTEGER ){ sqlite3_int64 iDel = sqlite3_value_int64(apVal[1]); - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, &apVal[2]); + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, &apVal[2], 0); } return rc; } @@ -251226,7 +254107,7 @@ static void fts5StorageInsert( ){ int rc = *pRc; if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, piRowid); + rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, 0, apVal, piRowid); } if( rc==SQLITE_OK ){ rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal, *piRowid); @@ -251234,6 +254115,67 @@ static void fts5StorageInsert( *pRc = rc; } +/* +** +** This function is called when the user attempts an UPDATE on a contentless +** table. Parameter bRowidModified is true if the UPDATE statement modifies +** the rowid value. Parameter apVal[] contains the new values for each user +** defined column of the fts5 table. pConfig is the configuration object of the +** table being updated (guaranteed to be contentless). The contentless_delete=1 +** and contentless_unindexed=1 options may or may not be set. +** +** This function returns SQLITE_OK if the UPDATE can go ahead, or an SQLite +** error code if it cannot. In this case an error message is also loaded into +** pConfig. Output parameter (*pbContent) is set to true if the caller should +** update the %_content table only - not the FTS index or any other shadow +** table. This occurs when an UPDATE modifies only UNINDEXED columns of the +** table. +** +** An UPDATE may proceed if: +** +** * The only columns modified are UNINDEXED columns, or +** +** * The contentless_delete=1 option was specified and all of the indexed +** columns (not a subset) have been modified. +*/ +static int fts5ContentlessUpdate( + Fts5Config *pConfig, + sqlite3_value **apVal, + int bRowidModified, + int *pbContent +){ + int ii; + int bSeenIndex = 0; /* Have seen modified indexed column */ + int bSeenIndexNC = 0; /* Have seen unmodified indexed column */ + int rc = SQLITE_OK; + + for(ii=0; iinCol; ii++){ + if( pConfig->abUnindexed[ii]==0 ){ + if( sqlite3_value_nochange(apVal[ii]) ){ + bSeenIndexNC++; + }else{ + bSeenIndex++; + } + } + } + + if( bSeenIndex==0 && bRowidModified==0 ){ + *pbContent = 1; + }else{ + if( bSeenIndexNC || pConfig->bContentlessDelete==0 ){ + rc = SQLITE_ERROR; + sqlite3Fts5ConfigErrmsg(pConfig, + (pConfig->bContentlessDelete ? + "%s a subset of columns on fts5 contentless-delete table: %s" : + "%s contentless fts5 table: %s") + , "cannot UPDATE", pConfig->zName + ); + } + } + + return rc; +} + /* ** This function is the implementation of the xUpdate callback used by ** FTS3 virtual tables. It is invoked by SQLite each time a row is to be @@ -251258,7 +254200,6 @@ static int fts5UpdateMethod( Fts5Config *pConfig = pTab->p.pConfig; int eType0; /* value_type() of apVal[0] */ int rc = SQLITE_OK; /* Return code */ - int bUpdateOrDelete = 0; /* A transaction must be open when this is called. */ assert( pTab->ts.eState==1 || pTab->ts.eState==2 ); @@ -251270,7 +254211,7 @@ static int fts5UpdateMethod( ); assert( pTab->p.pConfig->pzErrmsg==0 ); if( pConfig->pgsz==0 ){ - rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + rc = sqlite3Fts5ConfigLoad(pTab->p.pConfig, pTab->p.pConfig->iCookie); if( rc!=SQLITE_OK ) return rc; } @@ -251295,7 +254236,6 @@ static int fts5UpdateMethod( rc = SQLITE_ERROR; }else{ rc = fts5SpecialDelete(pTab, apVal); - bUpdateOrDelete = 1; } }else{ rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]); @@ -251320,88 +254260,104 @@ static int fts5UpdateMethod( assert( eType0==SQLITE_INTEGER || eType0==SQLITE_NULL ); assert( nArg!=1 || eType0==SQLITE_INTEGER ); - /* Filter out attempts to run UPDATE or DELETE on contentless tables. - ** This is not suported. Except - they are both supported if the CREATE - ** VIRTUAL TABLE statement contained "contentless_delete=1". */ - if( eType0==SQLITE_INTEGER - && pConfig->eContent==FTS5_CONTENT_NONE - && pConfig->bContentlessDelete==0 - ){ - pTab->p.base.zErrMsg = sqlite3_mprintf( - "cannot %s contentless fts5 table: %s", - (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName - ); - rc = SQLITE_ERROR; - } - /* DELETE */ - else if( nArg==1 ){ - i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0); - bUpdateOrDelete = 1; + if( nArg==1 ){ + /* It is only possible to DELETE from a contentless table if the + ** contentless_delete=1 flag is set. */ + if( fts5IsContentless(pTab, 1) && pConfig->bContentlessDelete==0 ){ + fts5SetVtabError(pTab, + "cannot DELETE from contentless fts5 table: %s", pConfig->zName + ); + rc = SQLITE_ERROR; + }else{ + i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0, 0); + } } /* INSERT or UPDATE */ else{ int eType1 = sqlite3_value_numeric_type(apVal[1]); - if( eType1!=SQLITE_INTEGER && eType1!=SQLITE_NULL ){ - rc = SQLITE_MISMATCH; + /* It is an error to write an fts5_locale() value to a table without + ** the locale=1 option. */ + if( pConfig->bLocale==0 ){ + int ii; + for(ii=0; iinCol; ii++){ + sqlite3_value *pVal = apVal[ii+2]; + if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + fts5SetVtabError(pTab, "fts5_locale() requires locale=1"); + rc = SQLITE_MISMATCH; + goto update_out; + } + } } - else if( eType0!=SQLITE_INTEGER ){ + if( eType0!=SQLITE_INTEGER ){ /* An INSERT statement. If the conflict-mode is REPLACE, first remove ** the current entry (if any). */ if( eConflict==SQLITE_REPLACE && eType1==SQLITE_INTEGER ){ i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); - bUpdateOrDelete = 1; + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0, 0); } fts5StorageInsert(&rc, pTab, apVal, pRowid); } /* UPDATE */ else{ + Fts5Storage *pStorage = pTab->pStorage; i64 iOld = sqlite3_value_int64(apVal[0]); /* Old rowid */ i64 iNew = sqlite3_value_int64(apVal[1]); /* New rowid */ - if( eType1==SQLITE_INTEGER && iOld!=iNew ){ + int bContent = 0; /* Content only update */ + + /* If this is a contentless table (including contentless_unindexed=1 + ** tables), check if the UPDATE may proceed. */ + if( fts5IsContentless(pTab, 1) ){ + rc = fts5ContentlessUpdate(pConfig, &apVal[2], iOld!=iNew, &bContent); + if( rc!=SQLITE_OK ) goto update_out; + } + + if( eType1!=SQLITE_INTEGER ){ + rc = SQLITE_MISMATCH; + }else if( iOld!=iNew ){ + assert( bContent==0 ); if( eConflict==SQLITE_REPLACE ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 1); if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iNew, 0, 0); } fts5StorageInsert(&rc, pTab, apVal, pRowid); }else{ - rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, pRowid); + rc = sqlite3Fts5StorageFindDeleteRow(pStorage, iOld); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5StorageContentInsert(pStorage, 0, apVal, pRowid); + } if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 0); } if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal,*pRowid); + rc = sqlite3Fts5StorageIndexInsert(pStorage, apVal, *pRowid); } } + }else if( bContent ){ + /* This occurs when an UPDATE on a contentless table affects *only* + ** UNINDEXED columns. This is a no-op for contentless_unindexed=0 + ** tables, or a write to the %_content table only for =1 tables. */ + assert( fts5IsContentless(pTab, 1) ); + rc = sqlite3Fts5StorageFindDeleteRow(pStorage, iOld); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5StorageContentInsert(pStorage, 1, apVal, pRowid); + } }else{ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 1); fts5StorageInsert(&rc, pTab, apVal, pRowid); } - bUpdateOrDelete = 1; + sqlite3Fts5StorageReleaseDeleteRow(pStorage); } } } - if( rc==SQLITE_OK - && bUpdateOrDelete - && pConfig->bSecureDelete - && pConfig->iVersion==FTS5_CURRENT_VERSION - ){ - rc = sqlite3Fts5StorageConfigValue( - pTab->pStorage, "version", 0, FTS5_CURRENT_VERSION_SECUREDELETE - ); - if( rc==SQLITE_OK ){ - pConfig->iVersion = FTS5_CURRENT_VERSION_SECUREDELETE; - } - } - + update_out: pTab->p.pConfig->pzErrmsg = 0; return rc; } @@ -251423,9 +254379,11 @@ static int fts5SyncMethod(sqlite3_vtab *pVtab){ ** Implementation of xBegin() method. */ static int fts5BeginMethod(sqlite3_vtab *pVtab){ - fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_BEGIN, 0); - fts5NewTransaction((Fts5FullTable*)pVtab); - return SQLITE_OK; + int rc = fts5NewTransaction((Fts5FullTable*)pVtab); + if( rc==SQLITE_OK ){ + fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_BEGIN, 0); + } + return rc; } /* @@ -251448,6 +254406,7 @@ static int fts5RollbackMethod(sqlite3_vtab *pVtab){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; fts5CheckTransactionState(pTab, FTS5_ROLLBACK, 0); rc = sqlite3Fts5StorageRollback(pTab->pStorage); + pTab->p.pConfig->pgsz = 0; return rc; } @@ -251479,17 +254438,40 @@ static int fts5ApiRowCount(Fts5Context *pCtx, i64 *pnRow){ return sqlite3Fts5StorageRowCount(pTab->pStorage, pnRow); } -static int fts5ApiTokenize( +/* +** Implementation of xTokenize_v2() API. +*/ +static int fts5ApiTokenize_v2( Fts5Context *pCtx, const char *pText, int nText, + const char *pLoc, int nLoc, void *pUserData, int (*xToken)(void*, int, const char*, int, int, int) ){ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab); - return sqlite3Fts5Tokenize( - pTab->pConfig, FTS5_TOKENIZE_AUX, pText, nText, pUserData, xToken + int rc = SQLITE_OK; + + sqlite3Fts5SetLocale(pTab->pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pTab->pConfig, + FTS5_TOKENIZE_AUX, pText, nText, pUserData, xToken ); + sqlite3Fts5SetLocale(pTab->pConfig, 0, 0); + + return rc; +} + +/* +** Implementation of xTokenize() API. This is just xTokenize_v2() with NULL/0 +** passed as the locale. +*/ +static int fts5ApiTokenize( + Fts5Context *pCtx, + const char *pText, int nText, + void *pUserData, + int (*xToken)(void*, int, const char*, int, int, int) +){ + return fts5ApiTokenize_v2(pCtx, pText, nText, 0, 0, pUserData, xToken); } static int fts5ApiPhraseCount(Fts5Context *pCtx){ @@ -251502,6 +254484,49 @@ static int fts5ApiPhraseSize(Fts5Context *pCtx, int iPhrase){ return sqlite3Fts5ExprPhraseSize(pCsr->pExpr, iPhrase); } +/* +** Argument pStmt is an SQL statement of the type used by Fts5Cursor. This +** function extracts the text value of column iCol of the current row. +** Additionally, if there is an associated locale, it invokes +** sqlite3Fts5SetLocale() to configure the tokenizer. In all cases the caller +** should invoke sqlite3Fts5ClearLocale() to clear the locale at some point +** after this function returns. +** +** If successful, (*ppText) is set to point to a buffer containing the text +** value as utf-8 and SQLITE_OK returned. (*pnText) is set to the size of that +** buffer in bytes. It is not guaranteed to be nul-terminated. If an error +** occurs, an SQLite error code is returned. The final values of the two +** output parameters are undefined in this case. +*/ +static int fts5TextFromStmt( + Fts5Config *pConfig, + sqlite3_stmt *pStmt, + int iCol, + const char **ppText, + int *pnText +){ + sqlite3_value *pVal = sqlite3_column_value(pStmt, iCol+1); + const char *pLoc = 0; + int nLoc = 0; + int rc = SQLITE_OK; + + if( pConfig->bLocale + && pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, ppText, pnText, &pLoc, &nLoc); + }else{ + *ppText = (const char*)sqlite3_value_text(pVal); + *pnText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale && pConfig->eContent==FTS5_CONTENT_NORMAL ){ + pLoc = (const char*)sqlite3_column_text(pStmt, iCol+1+pConfig->nCol); + nLoc = sqlite3_column_bytes(pStmt, iCol+1+pConfig->nCol); + } + } + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + return rc; +} + static int fts5ApiColumnText( Fts5Context *pCtx, int iCol, @@ -251511,28 +254536,35 @@ static int fts5ApiColumnText( int rc = SQLITE_OK; Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab); + + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); if( iCol<0 || iCol>=pTab->pConfig->nCol ){ rc = SQLITE_RANGE; - }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab)) - || pCsr->ePlan==FTS5_PLAN_SPECIAL - ){ + }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab), 0) ){ *pz = 0; *pn = 0; }else{ rc = fts5SeekCursor(pCsr, 0); if( rc==SQLITE_OK ){ - *pz = (const char*)sqlite3_column_text(pCsr->pStmt, iCol+1); - *pn = sqlite3_column_bytes(pCsr->pStmt, iCol+1); + rc = fts5TextFromStmt(pTab->pConfig, pCsr->pStmt, iCol, pz, pn); + sqlite3Fts5ClearLocale(pTab->pConfig); } } return rc; } +/* +** This is called by various API functions - xInst, xPhraseFirst, +** xPhraseFirstColumn etc. - to obtain the position list for phrase iPhrase +** of the current row. This function works for both detail=full tables (in +** which case the position-list was read from the fts index) or for other +** detail= modes if the row content is available. +*/ static int fts5CsrPoslist( - Fts5Cursor *pCsr, - int iPhrase, - const u8 **pa, - int *pn + Fts5Cursor *pCsr, /* Fts5 cursor object */ + int iPhrase, /* Phrase to find position list for */ + const u8 **pa, /* OUT: Pointer to position list buffer */ + int *pn /* OUT: Size of (*pa) in bytes */ ){ Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig; int rc = SQLITE_OK; @@ -251540,20 +254572,32 @@ static int fts5CsrPoslist( if( iPhrase<0 || iPhrase>=sqlite3Fts5ExprPhraseCount(pCsr->pExpr) ){ rc = SQLITE_RANGE; + }else if( pConfig->eDetail!=FTS5_DETAIL_FULL + && fts5IsContentless((Fts5FullTable*)pCsr->base.pVtab, 1) + ){ + *pa = 0; + *pn = 0; + return SQLITE_OK; }else if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){ if( pConfig->eDetail!=FTS5_DETAIL_FULL ){ Fts5PoslistPopulator *aPopulator; int i; + aPopulator = sqlite3Fts5ExprClearPoslists(pCsr->pExpr, bLive); if( aPopulator==0 ) rc = SQLITE_NOMEM; + if( rc==SQLITE_OK ){ + rc = fts5SeekCursor(pCsr, 0); + } for(i=0; inCol && rc==SQLITE_OK; i++){ - int n; const char *z; - rc = fts5ApiColumnText((Fts5Context*)pCsr, i, &z, &n); + const char *z = 0; + int n = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, i, &z, &n); if( rc==SQLITE_OK ){ rc = sqlite3Fts5ExprPopulatePoslists( pConfig, pCsr->pExpr, aPopulator, i, z, n ); } + sqlite3Fts5ClearLocale(pConfig); } sqlite3_free(aPopulator); @@ -251578,7 +254622,6 @@ static int fts5CsrPoslist( *pn = 0; } - return rc; } @@ -251647,7 +254690,8 @@ static int fts5CacheInstArray(Fts5Cursor *pCsr){ aInst[0] = iBest; aInst[1] = FTS5_POS2COLUMN(aIter[iBest].iPos); aInst[2] = FTS5_POS2OFFSET(aIter[iBest].iPos); - if( aInst[1]<0 || aInst[1]>=nCol ){ + assert( aInst[1]>=0 ); + if( aInst[1]>=nCol ){ rc = FTS5_CORRUPT; break; } @@ -251725,7 +254769,7 @@ static int fts5ApiColumnSize(Fts5Context *pCtx, int iCol, int *pnToken){ if( pConfig->bColumnsize ){ i64 iRowid = fts5CursorRowid(pCsr); rc = sqlite3Fts5StorageDocsize(pTab->pStorage, iRowid, pCsr->aColumnSize); - }else if( pConfig->zContent==0 ){ + }else if( !pConfig->zContent || pConfig->eContent==FTS5_CONTENT_UNINDEXED ){ int i; for(i=0; inCol; i++){ if( pConfig->abUnindexed[i]==0 ){ @@ -251734,17 +254778,19 @@ static int fts5ApiColumnSize(Fts5Context *pCtx, int iCol, int *pnToken){ } }else{ int i; + rc = fts5SeekCursor(pCsr, 0); for(i=0; rc==SQLITE_OK && inCol; i++){ if( pConfig->abUnindexed[i]==0 ){ - const char *z; int n; - void *p = (void*)(&pCsr->aColumnSize[i]); + const char *z = 0; + int n = 0; pCsr->aColumnSize[i] = 0; - rc = fts5ApiColumnText(pCtx, i, &z, &n); + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, i, &z, &n); if( rc==SQLITE_OK ){ - rc = sqlite3Fts5Tokenize( - pConfig, FTS5_TOKENIZE_AUX, z, n, p, fts5ColumnSizeCb + rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_AUX, + z, n, (void*)&pCsr->aColumnSize[i], fts5ColumnSizeCb ); } + sqlite3Fts5ClearLocale(pConfig); } } } @@ -251824,11 +254870,10 @@ static void *fts5ApiGetAuxdata(Fts5Context *pCtx, int bClear){ } static void fts5ApiPhraseNext( - Fts5Context *pUnused, + Fts5Context *pCtx, Fts5PhraseIter *pIter, int *piCol, int *piOff ){ - UNUSED_PARAM(pUnused); if( pIter->a>=pIter->b ){ *piCol = -1; *piOff = -1; @@ -251836,8 +254881,12 @@ static void fts5ApiPhraseNext( int iVal; pIter->a += fts5GetVarint32(pIter->a, iVal); if( iVal==1 ){ + /* Avoid returning a (*piCol) value that is too large for the table, + ** even if the position-list is corrupt. The caller might not be + ** expecting it. */ + int nCol = ((Fts5Table*)(((Fts5Cursor*)pCtx)->base.pVtab))->pConfig->nCol; pIter->a += fts5GetVarint32(pIter->a, iVal); - *piCol = iVal; + *piCol = (iVal>=nCol ? nCol-1 : iVal); *piOff = 0; pIter->a += fts5GetVarint32(pIter->a, iVal); } @@ -251987,8 +255036,48 @@ static int fts5ApiQueryPhrase(Fts5Context*, int, void*, int(*)(const Fts5ExtensionApi*, Fts5Context*, void*) ); +/* +** The xColumnLocale() API. +*/ +static int fts5ApiColumnLocale( + Fts5Context *pCtx, + int iCol, + const char **pzLocale, + int *pnLocale +){ + int rc = SQLITE_OK; + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig; + + *pzLocale = 0; + *pnLocale = 0; + + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); + if( iCol<0 || iCol>=pConfig->nCol ){ + rc = SQLITE_RANGE; + }else if( + pConfig->abUnindexed[iCol]==0 + && 0==fts5IsContentless((Fts5FullTable*)pCsr->base.pVtab, 1) + && pConfig->bLocale + ){ + rc = fts5SeekCursor(pCsr, 0); + if( rc==SQLITE_OK ){ + const char *zDummy = 0; + int nDummy = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, iCol, &zDummy, &nDummy); + if( rc==SQLITE_OK ){ + *pzLocale = pConfig->t.pLocale; + *pnLocale = pConfig->t.nLocale; + } + sqlite3Fts5ClearLocale(pConfig); + } + } + + return rc; +} + static const Fts5ExtensionApi sFts5Api = { - 3, /* iVersion */ + 4, /* iVersion */ fts5ApiUserData, fts5ApiColumnCount, fts5ApiRowCount, @@ -252009,7 +255098,9 @@ static const Fts5ExtensionApi sFts5Api = { fts5ApiPhraseFirstColumn, fts5ApiPhraseNextColumn, fts5ApiQueryToken, - fts5ApiInstToken + fts5ApiInstToken, + fts5ApiColumnLocale, + fts5ApiTokenize_v2 }; /* @@ -252060,6 +255151,7 @@ static void fts5ApiInvoke( sqlite3_value **argv ){ assert( pCsr->pAux==0 ); + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); pCsr->pAux = pAux; pAux->xFunc(&sFts5Api, (Fts5Context*)pCsr, context, argc, argv); pCsr->pAux = 0; @@ -252073,6 +255165,21 @@ static Fts5Cursor *fts5CursorFromCsrid(Fts5Global *pGlobal, i64 iCsrId){ return pCsr; } +/* +** Parameter zFmt is a printf() style formatting string. This function +** formats it using the trailing arguments and returns the result as +** an error message to the context passed as the first argument. +*/ +static void fts5ResultError(sqlite3_context *pCtx, const char *zFmt, ...){ + char *zErr = 0; + va_list ap; + va_start(ap, zFmt); + zErr = sqlite3_vmprintf(zFmt, ap); + sqlite3_result_error(pCtx, zErr, -1); + sqlite3_free(zErr); + va_end(ap); +} + static void fts5ApiCallback( sqlite3_context *context, int argc, @@ -252088,12 +255195,13 @@ static void fts5ApiCallback( iCsrId = sqlite3_value_int64(argv[0]); pCsr = fts5CursorFromCsrid(pAux->pGlobal, iCsrId); - if( pCsr==0 || pCsr->ePlan==0 ){ - char *zErr = sqlite3_mprintf("no such cursor: %lld", iCsrId); - sqlite3_result_error(context, zErr, -1); - sqlite3_free(zErr); + if( pCsr==0 || (pCsr->ePlan==0 || pCsr->ePlan==FTS5_PLAN_SPECIAL) ){ + fts5ResultError(context, "no such cursor: %lld", iCsrId); }else{ + sqlite3_vtab *pTab = pCsr->base.pVtab; fts5ApiInvoke(pAux, pCsr, context, argc-1, &argv[1]); + sqlite3_free(pTab->zErrMsg); + pTab->zErrMsg = 0; } } @@ -252211,8 +255319,8 @@ static int fts5ColumnMethod( ** auxiliary function. */ sqlite3_result_int64(pCtx, pCsr->iCsrId); }else if( iCol==pConfig->nCol+1 ){ - /* The value of the "rank" column. */ + if( pCsr->ePlan==FTS5_PLAN_SOURCE ){ fts5PoslistBlob(pCtx, pCsr); }else if( @@ -252223,20 +255331,32 @@ static int fts5ColumnMethod( fts5ApiInvoke(pCsr->pRank, pCsr, pCtx, pCsr->nRankArg, pCsr->apRankArg); } } - }else if( !fts5IsContentless(pTab) ){ - pConfig->pzErrmsg = &pTab->p.base.zErrMsg; - rc = fts5SeekCursor(pCsr, 1); - if( rc==SQLITE_OK ){ - sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1)); + }else{ + if( !sqlite3_vtab_nochange(pCtx) && pConfig->eContent!=FTS5_CONTENT_NONE ){ + pConfig->pzErrmsg = &pTab->p.base.zErrMsg; + rc = fts5SeekCursor(pCsr, 1); + if( rc==SQLITE_OK ){ + sqlite3_value *pVal = sqlite3_column_value(pCsr->pStmt, iCol+1); + if( pConfig->bLocale + && pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + const char *z = 0; + int n = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, iCol, &z, &n); + if( rc==SQLITE_OK ){ + sqlite3_result_text(pCtx, z, n, SQLITE_TRANSIENT); + } + sqlite3Fts5ClearLocale(pConfig); + }else{ + sqlite3_result_value(pCtx, pVal); + } + } + + pConfig->pzErrmsg = 0; } - pConfig->pzErrmsg = 0; - }else if( pConfig->bContentlessDelete && sqlite3_vtab_nochange(pCtx) ){ - char *zErr = sqlite3_mprintf("cannot UPDATE a subset of " - "columns on fts5 contentless-delete table: %s", pConfig->zName - ); - sqlite3_result_error(pCtx, zErr, -1); - sqlite3_free(zErr); } + return rc; } @@ -252376,47 +255496,210 @@ static int fts5CreateAux( } /* -** Register a new tokenizer. This is the implementation of the -** fts5_api.xCreateTokenizer() method. +** This function is used by xCreateTokenizer_v2() and xCreateTokenizer(). +** It allocates and partially populates a new Fts5TokenizerModule object. +** The new object is already linked into the Fts5Global context before +** returning. +** +** If successful, SQLITE_OK is returned and a pointer to the new +** Fts5TokenizerModule object returned via output parameter (*ppNew). All +** that is required is for the caller to fill in the methods in +** Fts5TokenizerModule.x1 and x2, and to set Fts5TokenizerModule.bV2Native +** as appropriate. +** +** If an error occurs, an SQLite error code is returned and the final value +** of (*ppNew) undefined. */ -static int fts5CreateTokenizer( - fts5_api *pApi, /* Global context (one per db handle) */ +static int fts5NewTokenizerModule( + Fts5Global *pGlobal, /* Global context (one per db handle) */ const char *zName, /* Name of new function */ void *pUserData, /* User data for aux. function */ - fts5_tokenizer *pTokenizer, /* Tokenizer implementation */ - void(*xDestroy)(void*) /* Destructor for pUserData */ + void(*xDestroy)(void*), /* Destructor for pUserData */ + Fts5TokenizerModule **ppNew ){ - Fts5Global *pGlobal = (Fts5Global*)pApi; - Fts5TokenizerModule *pNew; - sqlite3_int64 nName; /* Size of zName and its \0 terminator */ - sqlite3_int64 nByte; /* Bytes of space to allocate */ int rc = SQLITE_OK; + Fts5TokenizerModule *pNew; + sqlite3_int64 nName; /* Size of zName and its \0 terminator */ + sqlite3_int64 nByte; /* Bytes of space to allocate */ nName = strlen(zName) + 1; nByte = sizeof(Fts5TokenizerModule) + nName; - pNew = (Fts5TokenizerModule*)sqlite3_malloc64(nByte); + *ppNew = pNew = (Fts5TokenizerModule*)sqlite3Fts5MallocZero(&rc, nByte); if( pNew ){ - memset(pNew, 0, (size_t)nByte); pNew->zName = (char*)&pNew[1]; memcpy(pNew->zName, zName, nName); pNew->pUserData = pUserData; - pNew->x = *pTokenizer; pNew->xDestroy = xDestroy; pNew->pNext = pGlobal->pTok; pGlobal->pTok = pNew; if( pNew->pNext==0 ){ pGlobal->pDfltTok = pNew; } + } + + return rc; +} + +/* +** An instance of this type is used as the Fts5Tokenizer object for +** wrapper tokenizers - those that provide access to a v1 tokenizer via +** the fts5_tokenizer_v2 API, and those that provide access to a v2 tokenizer +** via the fts5_tokenizer API. +*/ +typedef struct Fts5VtoVTokenizer Fts5VtoVTokenizer; +struct Fts5VtoVTokenizer { + int bV2Native; /* True if v2 native tokenizer */ + fts5_tokenizer x1; /* Tokenizer functions */ + fts5_tokenizer_v2 x2; /* V2 tokenizer functions */ + Fts5Tokenizer *pReal; +}; + +/* +** Create a wrapper tokenizer. The context argument pCtx points to the +** Fts5TokenizerModule object. +*/ +static int fts5VtoVCreate( + void *pCtx, + const char **azArg, + int nArg, + Fts5Tokenizer **ppOut +){ + Fts5TokenizerModule *pMod = (Fts5TokenizerModule*)pCtx; + Fts5VtoVTokenizer *pNew = 0; + int rc = SQLITE_OK; + + pNew = (Fts5VtoVTokenizer*)sqlite3Fts5MallocZero(&rc, sizeof(*pNew)); + if( rc==SQLITE_OK ){ + pNew->x1 = pMod->x1; + pNew->x2 = pMod->x2; + pNew->bV2Native = pMod->bV2Native; + if( pMod->bV2Native ){ + rc = pMod->x2.xCreate(pMod->pUserData, azArg, nArg, &pNew->pReal); + }else{ + rc = pMod->x1.xCreate(pMod->pUserData, azArg, nArg, &pNew->pReal); + } + if( rc!=SQLITE_OK ){ + sqlite3_free(pNew); + pNew = 0; + } + } + + *ppOut = (Fts5Tokenizer*)pNew; + return rc; +} + +/* +** Delete an Fts5VtoVTokenizer wrapper tokenizer. +*/ +static void fts5VtoVDelete(Fts5Tokenizer *pTok){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + if( p ){ + if( p->bV2Native ){ + p->x2.xDelete(p->pReal); + }else{ + p->x1.xDelete(p->pReal); + } + sqlite3_free(p); + } +} + + +/* +** xTokenizer method for a wrapper tokenizer that offers the v1 interface +** (no support for locales). +*/ +static int fts5V1toV2Tokenize( + Fts5Tokenizer *pTok, + void *pCtx, int flags, + const char *pText, int nText, + int (*xToken)(void*, int, const char*, int, int, int) +){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + assert( p->bV2Native ); + return p->x2.xTokenize(p->pReal, pCtx, flags, pText, nText, 0, 0, xToken); +} + +/* +** xTokenizer method for a wrapper tokenizer that offers the v2 interface +** (with locale support). +*/ +static int fts5V2toV1Tokenize( + Fts5Tokenizer *pTok, + void *pCtx, int flags, + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)(void*, int, const char*, int, int, int) +){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + assert( p->bV2Native==0 ); + UNUSED_PARAM2(pLocale,nLocale); + return p->x1.xTokenize(p->pReal, pCtx, flags, pText, nText, xToken); +} + +/* +** Register a new tokenizer. This is the implementation of the +** fts5_api.xCreateTokenizer_v2() method. +*/ +static int fts5CreateTokenizer_v2( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of new function */ + void *pUserData, /* User data for aux. function */ + fts5_tokenizer_v2 *pTokenizer, /* Tokenizer implementation */ + void(*xDestroy)(void*) /* Destructor for pUserData */ +){ + Fts5Global *pGlobal = (Fts5Global*)pApi; + int rc = SQLITE_OK; + + if( pTokenizer->iVersion>2 ){ + rc = SQLITE_ERROR; }else{ - rc = SQLITE_NOMEM; + Fts5TokenizerModule *pNew = 0; + rc = fts5NewTokenizerModule(pGlobal, zName, pUserData, xDestroy, &pNew); + if( pNew ){ + pNew->x2 = *pTokenizer; + pNew->bV2Native = 1; + pNew->x1.xCreate = fts5VtoVCreate; + pNew->x1.xTokenize = fts5V1toV2Tokenize; + pNew->x1.xDelete = fts5VtoVDelete; + } } return rc; } +/* +** The fts5_api.xCreateTokenizer() method. +*/ +static int fts5CreateTokenizer( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of new function */ + void *pUserData, /* User data for aux. function */ + fts5_tokenizer *pTokenizer, /* Tokenizer implementation */ + void(*xDestroy)(void*) /* Destructor for pUserData */ +){ + Fts5TokenizerModule *pNew = 0; + int rc = SQLITE_OK; + + rc = fts5NewTokenizerModule( + (Fts5Global*)pApi, zName, pUserData, xDestroy, &pNew + ); + if( pNew ){ + pNew->x1 = *pTokenizer; + pNew->x2.xCreate = fts5VtoVCreate; + pNew->x2.xTokenize = fts5V2toV1Tokenize; + pNew->x2.xDelete = fts5VtoVDelete; + } + return rc; +} + +/* +** Search the global context passed as the first argument for a tokenizer +** module named zName. If found, return a pointer to the Fts5TokenizerModule +** object. Otherwise, return NULL. +*/ static Fts5TokenizerModule *fts5LocateTokenizer( - Fts5Global *pGlobal, - const char *zName + Fts5Global *pGlobal, /* Global (one per db handle) object */ + const char *zName /* Name of tokenizer module to find */ ){ Fts5TokenizerModule *pMod = 0; @@ -252431,6 +255714,36 @@ static Fts5TokenizerModule *fts5LocateTokenizer( return pMod; } +/* +** Find a tokenizer. This is the implementation of the +** fts5_api.xFindTokenizer_v2() method. +*/ +static int fts5FindTokenizer_v2( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of tokenizer */ + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer /* Populate this object */ +){ + int rc = SQLITE_OK; + Fts5TokenizerModule *pMod; + + pMod = fts5LocateTokenizer((Fts5Global*)pApi, zName); + if( pMod ){ + if( pMod->bV2Native ){ + *ppUserData = pMod->pUserData; + }else{ + *ppUserData = (void*)pMod; + } + *ppTokenizer = &pMod->x2; + }else{ + *ppTokenizer = 0; + *ppUserData = 0; + rc = SQLITE_ERROR; + } + + return rc; +} + /* ** Find a tokenizer. This is the implementation of the ** fts5_api.xFindTokenizer() method. @@ -252446,55 +255759,75 @@ static int fts5FindTokenizer( pMod = fts5LocateTokenizer((Fts5Global*)pApi, zName); if( pMod ){ - *pTokenizer = pMod->x; - *ppUserData = pMod->pUserData; + if( pMod->bV2Native==0 ){ + *ppUserData = pMod->pUserData; + }else{ + *ppUserData = (void*)pMod; + } + *pTokenizer = pMod->x1; }else{ - memset(pTokenizer, 0, sizeof(fts5_tokenizer)); + memset(pTokenizer, 0, sizeof(*pTokenizer)); + *ppUserData = 0; rc = SQLITE_ERROR; } return rc; } -static int sqlite3Fts5GetTokenizer( - Fts5Global *pGlobal, - const char **azArg, - int nArg, - Fts5Config *pConfig, - char **pzErr -){ - Fts5TokenizerModule *pMod; +/* +** Attempt to instantiate the tokenizer. +*/ +static int sqlite3Fts5LoadTokenizer(Fts5Config *pConfig){ + const char **azArg = pConfig->t.azArg; + const int nArg = pConfig->t.nArg; + Fts5TokenizerModule *pMod = 0; int rc = SQLITE_OK; - pMod = fts5LocateTokenizer(pGlobal, nArg==0 ? 0 : azArg[0]); + pMod = fts5LocateTokenizer(pConfig->pGlobal, nArg==0 ? 0 : azArg[0]); if( pMod==0 ){ assert( nArg>0 ); rc = SQLITE_ERROR; - if( pzErr ) *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); + sqlite3Fts5ConfigErrmsg(pConfig, "no such tokenizer: %s", azArg[0]); }else{ - rc = pMod->x.xCreate( - pMod->pUserData, (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->pTok + int (*xCreate)(void*, const char**, int, Fts5Tokenizer**) = 0; + if( pMod->bV2Native ){ + xCreate = pMod->x2.xCreate; + pConfig->t.pApi2 = &pMod->x2; + }else{ + pConfig->t.pApi1 = &pMod->x1; + xCreate = pMod->x1.xCreate; + } + + rc = xCreate(pMod->pUserData, + (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->t.pTok ); - pConfig->pTokApi = &pMod->x; + if( rc!=SQLITE_OK ){ - if( pzErr && rc!=SQLITE_NOMEM ){ - *pzErr = sqlite3_mprintf("error in tokenizer constructor"); + if( rc!=SQLITE_NOMEM ){ + sqlite3Fts5ConfigErrmsg(pConfig, "error in tokenizer constructor"); } - }else{ - pConfig->ePattern = sqlite3Fts5TokenizerPattern( - pMod->x.xCreate, pConfig->pTok + }else if( pMod->bV2Native==0 ){ + pConfig->t.ePattern = sqlite3Fts5TokenizerPattern( + pMod->x1.xCreate, pConfig->t.pTok ); } } if( rc!=SQLITE_OK ){ - pConfig->pTokApi = 0; - pConfig->pTok = 0; + pConfig->t.pApi1 = 0; + pConfig->t.pApi2 = 0; + pConfig->t.pTok = 0; } return rc; } + +/* +** xDestroy callback passed to sqlite3_create_module(). This is invoked +** when the db handle is being closed. Free memory associated with +** tokenizers and aux functions registered with this db handle. +*/ static void fts5ModuleDestroy(void *pCtx){ Fts5TokenizerModule *pTok, *pNextTok; Fts5Auxiliary *pAux, *pNextAux; @@ -252515,6 +255848,10 @@ static void fts5ModuleDestroy(void *pCtx){ sqlite3_free(pGlobal); } +/* +** Implementation of the fts5() function used by clients to obtain the +** API pointer. +*/ static void fts5Fts5Func( sqlite3_context *pCtx, /* Function call context */ int nArg, /* Number of args */ @@ -252538,7 +255875,82 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2025-02-18 13:38:58 873d4e274b4988d260ba8354a9718324a1c26187a4ab4c1cc0227c03d0f10e70", -1, SQLITE_TRANSIENT); +} + +/* +** Implementation of fts5_locale(LOCALE, TEXT) function. +** +** If parameter LOCALE is NULL, or a zero-length string, then a copy of +** TEXT is returned. Otherwise, both LOCALE and TEXT are interpreted as +** text, and the value returned is a blob consisting of: +** +** * The 4 bytes 0x00, 0xE0, 0xB2, 0xEb (FTS5_LOCALE_HEADER). +** * The LOCALE, as utf-8 text, followed by +** * 0x00, followed by +** * The TEXT, as utf-8 text. +** +** There is no final nul-terminator following the TEXT value. +*/ +static void fts5LocaleFunc( + sqlite3_context *pCtx, /* Function call context */ + int nArg, /* Number of args */ + sqlite3_value **apArg /* Function arguments */ +){ + const char *zLocale = 0; + int nLocale = 0; + const char *zText = 0; + int nText = 0; + + assert( nArg==2 ); + UNUSED_PARAM(nArg); + + zLocale = (const char*)sqlite3_value_text(apArg[0]); + nLocale = sqlite3_value_bytes(apArg[0]); + + zText = (const char*)sqlite3_value_text(apArg[1]); + nText = sqlite3_value_bytes(apArg[1]); + + if( zLocale==0 || zLocale[0]=='\0' ){ + sqlite3_result_text(pCtx, zText, nText, SQLITE_TRANSIENT); + }else{ + Fts5Global *p = (Fts5Global*)sqlite3_user_data(pCtx); + u8 *pBlob = 0; + u8 *pCsr = 0; + int nBlob = 0; + + nBlob = FTS5_LOCALE_HDR_SIZE + nLocale + 1 + nText; + pBlob = (u8*)sqlite3_malloc(nBlob); + if( pBlob==0 ){ + sqlite3_result_error_nomem(pCtx); + return; + } + + pCsr = pBlob; + memcpy(pCsr, (const u8*)p->aLocaleHdr, FTS5_LOCALE_HDR_SIZE); + pCsr += FTS5_LOCALE_HDR_SIZE; + memcpy(pCsr, zLocale, nLocale); + pCsr += nLocale; + (*pCsr++) = 0x00; + if( zText ) memcpy(pCsr, zText, nText); + assert( &pCsr[nText]==&pBlob[nBlob] ); + + sqlite3_result_blob(pCtx, pBlob, nBlob, sqlite3_free); + } +} + +/* +** Implementation of fts5_insttoken() function. +*/ +static void fts5InsttokenFunc( + sqlite3_context *pCtx, /* Function call context */ + int nArg, /* Number of args */ + sqlite3_value **apArg /* Function arguments */ +){ + assert( nArg==1 ); + (void)nArg; + sqlite3_result_value(pCtx, apArg[0]); + sqlite3_result_subtype(pCtx, FTS5_INSTTOKEN_SUBTYPE); } /* @@ -252633,10 +256045,22 @@ static int fts5Init(sqlite3 *db){ void *p = (void*)pGlobal; memset(pGlobal, 0, sizeof(Fts5Global)); pGlobal->db = db; - pGlobal->api.iVersion = 2; + pGlobal->api.iVersion = 3; pGlobal->api.xCreateFunction = fts5CreateAux; pGlobal->api.xCreateTokenizer = fts5CreateTokenizer; pGlobal->api.xFindTokenizer = fts5FindTokenizer; + pGlobal->api.xCreateTokenizer_v2 = fts5CreateTokenizer_v2; + pGlobal->api.xFindTokenizer_v2 = fts5FindTokenizer_v2; + + /* Initialize pGlobal->aLocaleHdr[] to a 128-bit pseudo-random vector. + ** The constants below were generated randomly. */ + sqlite3_randomness(sizeof(pGlobal->aLocaleHdr), pGlobal->aLocaleHdr); + pGlobal->aLocaleHdr[0] ^= 0xF924976D; + pGlobal->aLocaleHdr[1] ^= 0x16596E13; + pGlobal->aLocaleHdr[2] ^= 0x7C80BEAA; + pGlobal->aLocaleHdr[3] ^= 0x9B03A67F; + assert( sizeof(pGlobal->aLocaleHdr)==16 ); + rc = sqlite3_create_module_v2(db, "fts5", &fts5Mod, p, fts5ModuleDestroy); if( rc==SQLITE_OK ) rc = sqlite3Fts5IndexInit(db); if( rc==SQLITE_OK ) rc = sqlite3Fts5ExprInit(pGlobal, db); @@ -252655,6 +256079,20 @@ static int fts5Init(sqlite3 *db){ p, fts5SourceIdFunc, 0, 0 ); } + if( rc==SQLITE_OK ){ + rc = sqlite3_create_function( + db, "fts5_locale", 2, + SQLITE_UTF8|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE|SQLITE_SUBTYPE, + p, fts5LocaleFunc, 0, 0 + ); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_create_function( + db, "fts5_insttoken", 1, + SQLITE_UTF8|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE, + p, fts5InsttokenFunc, 0, 0 + ); + } } /* If SQLITE_FTS5_ENABLE_TEST_MI is defined, assume that the file @@ -252729,13 +256167,40 @@ SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3 *db){ /* #include "fts5Int.h" */ +/* +** pSavedRow: +** SQL statement FTS5_STMT_LOOKUP2 is a copy of FTS5_STMT_LOOKUP, it +** does a by-rowid lookup to retrieve a single row from the %_content +** table or equivalent external-content table/view. +** +** However, FTS5_STMT_LOOKUP2 is only used when retrieving the original +** values for a row being UPDATEd. In that case, the SQL statement is +** not reset and pSavedRow is set to point at it. This is so that the +** insert operation that follows the delete may access the original +** row values for any new values for which sqlite3_value_nochange() returns +** true. i.e. if the user executes: +** +** CREATE VIRTUAL TABLE ft USING fts5(a, b, c, locale=1); +** ... +** UPDATE fts SET a=?, b=? WHERE rowid=?; +** +** then the value passed to the xUpdate() method of this table as the +** new.c value is an sqlite3_value_nochange() value. So in this case it +** must be read from the saved row stored in Fts5Storage.pSavedRow. +** +** This is necessary - using sqlite3_value_nochange() instead of just having +** SQLite pass the original value back via xUpdate() - so as not to discard +** any locale information associated with such values. +** +*/ struct Fts5Storage { Fts5Config *pConfig; Fts5Index *pIndex; int bTotalsValid; /* True if nTotalRow/aTotalSize[] are valid */ i64 nTotalRow; /* Total number of rows in FTS table */ i64 *aTotalSize; /* Total sizes of each column */ - sqlite3_stmt *aStmt[11]; + sqlite3_stmt *pSavedRow; + sqlite3_stmt *aStmt[12]; }; @@ -252749,14 +256214,15 @@ struct Fts5Storage { # error "FTS5_STMT_LOOKUP mismatch" #endif -#define FTS5_STMT_INSERT_CONTENT 3 -#define FTS5_STMT_REPLACE_CONTENT 4 -#define FTS5_STMT_DELETE_CONTENT 5 -#define FTS5_STMT_REPLACE_DOCSIZE 6 -#define FTS5_STMT_DELETE_DOCSIZE 7 -#define FTS5_STMT_LOOKUP_DOCSIZE 8 -#define FTS5_STMT_REPLACE_CONFIG 9 -#define FTS5_STMT_SCAN 10 +#define FTS5_STMT_LOOKUP2 3 +#define FTS5_STMT_INSERT_CONTENT 4 +#define FTS5_STMT_REPLACE_CONTENT 5 +#define FTS5_STMT_DELETE_CONTENT 6 +#define FTS5_STMT_REPLACE_DOCSIZE 7 +#define FTS5_STMT_DELETE_DOCSIZE 8 +#define FTS5_STMT_LOOKUP_DOCSIZE 9 +#define FTS5_STMT_REPLACE_CONFIG 10 +#define FTS5_STMT_SCAN 11 /* ** Prepare the two insert statements - Fts5Storage.pInsertContent and @@ -252786,6 +256252,7 @@ static int fts5StorageGetStmt( "SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC", "SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC", "SELECT %s FROM %s T WHERE T.%Q=?", /* LOOKUP */ + "SELECT %s FROM %s T WHERE T.%Q=?", /* LOOKUP2 */ "INSERT INTO %Q.'%q_content' VALUES(%s)", /* INSERT_CONTENT */ "REPLACE INTO %Q.'%q_content' VALUES(%s)", /* REPLACE_CONTENT */ @@ -252801,6 +256268,8 @@ static int fts5StorageGetStmt( Fts5Config *pC = p->pConfig; char *zSql = 0; + assert( ArraySize(azStmt)==ArraySize(p->aStmt) ); + switch( eStmt ){ case FTS5_STMT_SCAN: zSql = sqlite3_mprintf(azStmt[eStmt], @@ -252817,6 +256286,7 @@ static int fts5StorageGetStmt( break; case FTS5_STMT_LOOKUP: + case FTS5_STMT_LOOKUP2: zSql = sqlite3_mprintf(azStmt[eStmt], pC->zContentExprlist, pC->zContent, pC->zContentRowid ); @@ -252824,20 +256294,35 @@ static int fts5StorageGetStmt( case FTS5_STMT_INSERT_CONTENT: case FTS5_STMT_REPLACE_CONTENT: { - int nCol = pC->nCol + 1; - char *zBind; + char *zBind = 0; int i; - zBind = sqlite3_malloc64(1 + nCol*2); - if( zBind ){ - for(i=0; ieContent==FTS5_CONTENT_NORMAL + || pC->eContent==FTS5_CONTENT_UNINDEXED + ); + + /* Add bindings for the "c*" columns - those that store the actual + ** table content. If eContent==NORMAL, then there is one binding + ** for each column. Or, if eContent==UNINDEXED, then there are only + ** bindings for the UNINDEXED columns. */ + for(i=0; rc==SQLITE_OK && i<(pC->nCol+1); i++){ + if( !i || pC->eContent==FTS5_CONTENT_NORMAL || pC->abUnindexed[i-1] ){ + zBind = sqlite3Fts5Mprintf(&rc, "%z%s?%d", zBind, zBind?",":"",i+1); } - zBind[i*2-1] = '\0'; - zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName, zBind); - sqlite3_free(zBind); } + + /* Add bindings for any "l*" columns. Only non-UNINDEXED columns + ** require these. */ + if( pC->bLocale && pC->eContent==FTS5_CONTENT_NORMAL ){ + for(i=0; rc==SQLITE_OK && inCol; i++){ + if( pC->abUnindexed[i]==0 ){ + zBind = sqlite3Fts5Mprintf(&rc, "%z,?%d", zBind, pC->nCol+i+2); + } + } + } + + zSql = sqlite3Fts5Mprintf(&rc, azStmt[eStmt], pC->zDb, pC->zName,zBind); + sqlite3_free(zBind); break; } @@ -252863,7 +256348,7 @@ static int fts5StorageGetStmt( rc = SQLITE_NOMEM; }else{ int f = SQLITE_PREPARE_PERSISTENT; - if( eStmt>FTS5_STMT_LOOKUP ) f |= SQLITE_PREPARE_NO_VTAB; + if( eStmt>FTS5_STMT_LOOKUP2 ) f |= SQLITE_PREPARE_NO_VTAB; p->pConfig->bLock++; rc = sqlite3_prepare_v3(pC->db, zSql, -1, f, &p->aStmt[eStmt], 0); p->pConfig->bLock--; @@ -252871,6 +256356,11 @@ static int fts5StorageGetStmt( if( rc!=SQLITE_OK && pzErrMsg ){ *pzErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pC->db)); } + if( rc==SQLITE_ERROR && eStmt>FTS5_STMT_LOOKUP2 && eStmtpIndex = pIndex; if( bCreate ){ - if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ int nDefn = 32 + pConfig->nCol*10; - char *zDefn = sqlite3_malloc64(32 + (sqlite3_int64)pConfig->nCol * 10); + char *zDefn = sqlite3_malloc64(32 + (sqlite3_int64)pConfig->nCol * 20); if( zDefn==0 ){ rc = SQLITE_NOMEM; }else{ @@ -253034,8 +256526,20 @@ static int sqlite3Fts5StorageOpen( sqlite3_snprintf(nDefn, zDefn, "id INTEGER PRIMARY KEY"); iOff = (int)strlen(zDefn); for(i=0; inCol; i++){ - sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", c%d", i); - iOff += (int)strlen(&zDefn[iOff]); + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->abUnindexed[i] + ){ + sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", c%d", i); + iOff += (int)strlen(&zDefn[iOff]); + } + } + if( pConfig->bLocale ){ + for(i=0; inCol; i++){ + if( pConfig->abUnindexed[i]==0 ){ + sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", l%d", i); + iOff += (int)strlen(&zDefn[iOff]); + } + } } rc = sqlite3Fts5CreateTable(pConfig, "content", zDefn, 0, pzErr); } @@ -253112,15 +256616,49 @@ static int fts5StorageInsertCallback( return sqlite3Fts5IndexWrite(pIdx, pCtx->iCol, pCtx->szCol-1, pToken, nToken); } +/* +** This function is used as part of an UPDATE statement that modifies the +** rowid of a row. In that case, this function is called first to set +** Fts5Storage.pSavedRow to point to a statement that may be used to +** access the original values of the row being deleted - iDel. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +** It is not considered an error if row iDel does not exist. In this case +** pSavedRow is not set and SQLITE_OK returned. +*/ +static int sqlite3Fts5StorageFindDeleteRow(Fts5Storage *p, i64 iDel){ + int rc = SQLITE_OK; + sqlite3_stmt *pSeek = 0; + + assert( p->pSavedRow==0 ); + rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP+1, &pSeek, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pSeek, 1, iDel); + if( sqlite3_step(pSeek)!=SQLITE_ROW ){ + rc = sqlite3_reset(pSeek); + }else{ + p->pSavedRow = pSeek; + } + } + + return rc; +} + /* ** If a row with rowid iDel is present in the %_content table, add the ** delete-markers to the FTS index necessary to delete it. Do not actually ** remove the %_content row at this time though. +** +** If parameter bSaveRow is true, then Fts5Storage.pSavedRow is left +** pointing to a statement (FTS5_STMT_LOOKUP2) that may be used to access +** the original values of the row being deleted. This is used by UPDATE +** statements. */ static int fts5StorageDeleteFromIndex( Fts5Storage *p, i64 iDel, - sqlite3_value **apVal + sqlite3_value **apVal, + int bSaveRow /* True to set pSavedRow */ ){ Fts5Config *pConfig = p->pConfig; sqlite3_stmt *pSeek = 0; /* SELECT to read row iDel from %_data */ @@ -253129,12 +256667,21 @@ static int fts5StorageDeleteFromIndex( int iCol; Fts5InsertCtx ctx; + assert( bSaveRow==0 || apVal==0 ); + assert( bSaveRow==0 || bSaveRow==1 ); + assert( FTS5_STMT_LOOKUP2==FTS5_STMT_LOOKUP+1 ); + if( apVal==0 ){ - rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP, &pSeek, 0); - if( rc!=SQLITE_OK ) return rc; - sqlite3_bind_int64(pSeek, 1, iDel); - if( sqlite3_step(pSeek)!=SQLITE_ROW ){ - return sqlite3_reset(pSeek); + if( p->pSavedRow && bSaveRow ){ + pSeek = p->pSavedRow; + p->pSavedRow = 0; + }else{ + rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP+bSaveRow, &pSeek, 0); + if( rc!=SQLITE_OK ) return rc; + sqlite3_bind_int64(pSeek, 1, iDel); + if( sqlite3_step(pSeek)!=SQLITE_ROW ){ + return sqlite3_reset(pSeek); + } } } @@ -253142,26 +256689,42 @@ static int fts5StorageDeleteFromIndex( ctx.iCol = -1; for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){ if( pConfig->abUnindexed[iCol-1]==0 ){ - const char *zText; - int nText; + sqlite3_value *pVal = 0; + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + assert( pSeek==0 || apVal==0 ); assert( pSeek!=0 || apVal!=0 ); if( pSeek ){ - zText = (const char*)sqlite3_column_text(pSeek, iCol); - nText = sqlite3_column_bytes(pSeek, iCol); - }else if( ALWAYS(apVal) ){ - zText = (const char*)sqlite3_value_text(apVal[iCol-1]); - nText = sqlite3_value_bytes(apVal[iCol-1]); + pVal = sqlite3_column_value(pSeek, iCol); }else{ - continue; + pVal = apVal[iCol-1]; } - ctx.szCol = 0; - rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_DOCUMENT, - zText, nText, (void*)&ctx, fts5StorageInsertCallback - ); - p->aTotalSize[iCol-1] -= (i64)ctx.szCol; - if( p->aTotalSize[iCol-1]<0 ){ - rc = FTS5_CORRUPT; + + if( pConfig->bLocale && sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale && pSeek ){ + pLoc = (const char*)sqlite3_column_text(pSeek, iCol + pConfig->nCol); + nLoc = sqlite3_column_bytes(pSeek, iCol + pConfig->nCol); + } + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + ctx.szCol = 0; + rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_DOCUMENT, + pText, nText, (void*)&ctx, fts5StorageInsertCallback + ); + p->aTotalSize[iCol-1] -= (i64)ctx.szCol; + if( rc==SQLITE_OK && p->aTotalSize[iCol-1]<0 ){ + rc = FTS5_CORRUPT; + } + sqlite3Fts5ClearLocale(pConfig); } } } @@ -253171,11 +256734,29 @@ static int fts5StorageDeleteFromIndex( p->nTotalRow--; } - rc2 = sqlite3_reset(pSeek); - if( rc==SQLITE_OK ) rc = rc2; + if( rc==SQLITE_OK && bSaveRow ){ + assert( p->pSavedRow==0 ); + p->pSavedRow = pSeek; + }else{ + rc2 = sqlite3_reset(pSeek); + if( rc==SQLITE_OK ) rc = rc2; + } return rc; } +/* +** Reset any saved statement pSavedRow. Zero pSavedRow as well. This +** should be called by the xUpdate() method of the fts5 table before +** returning from any operation that may have set Fts5Storage.pSavedRow. +*/ +static void sqlite3Fts5StorageReleaseDeleteRow(Fts5Storage *pStorage){ + assert( pStorage->pSavedRow==0 + || pStorage->pSavedRow==pStorage->aStmt[FTS5_STMT_LOOKUP2] + ); + sqlite3_reset(pStorage->pSavedRow); + pStorage->pSavedRow = 0; +} + /* ** This function is called to process a DELETE on a contentless_delete=1 ** table. It adds the tombstone required to delete the entry with rowid @@ -253188,7 +256769,9 @@ static int fts5StorageContentlessDelete(Fts5Storage *p, i64 iDel){ int rc = SQLITE_OK; assert( p->pConfig->bContentlessDelete ); - assert( p->pConfig->eContent==FTS5_CONTENT_NONE ); + assert( p->pConfig->eContent==FTS5_CONTENT_NONE + || p->pConfig->eContent==FTS5_CONTENT_UNINDEXED + ); /* Look up the origin of the document in the %_docsize table. Store ** this in stack variable iOrigin. */ @@ -253232,12 +256815,12 @@ static int fts5StorageInsertDocsize( rc = sqlite3Fts5IndexGetOrigin(p->pIndex, &iOrigin); sqlite3_bind_int64(pReplace, 3, iOrigin); } - if( rc==SQLITE_OK ){ - sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); - sqlite3_step(pReplace); - rc = sqlite3_reset(pReplace); - sqlite3_bind_null(pReplace, 2); - } + } + if( rc==SQLITE_OK ){ + sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); + sqlite3_step(pReplace); + rc = sqlite3_reset(pReplace); + sqlite3_bind_null(pReplace, 2); } } return rc; @@ -253291,7 +256874,12 @@ static int fts5StorageSaveTotals(Fts5Storage *p){ /* ** Remove a row from the FTS table. */ -static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **apVal){ +static int sqlite3Fts5StorageDelete( + Fts5Storage *p, /* Storage object */ + i64 iDel, /* Rowid to delete from table */ + sqlite3_value **apVal, /* Optional - values to remove from index */ + int bSaveRow /* If true, set pSavedRow for deleted row */ +){ Fts5Config *pConfig = p->pConfig; int rc; sqlite3_stmt *pDel = 0; @@ -253307,8 +256895,14 @@ static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **ap if( rc==SQLITE_OK ){ if( p->pConfig->bContentlessDelete ){ rc = fts5StorageContentlessDelete(p, iDel); + if( rc==SQLITE_OK + && bSaveRow + && p->pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ + rc = sqlite3Fts5StorageFindDeleteRow(p, iDel); + } }else{ - rc = fts5StorageDeleteFromIndex(p, iDel, apVal); + rc = fts5StorageDeleteFromIndex(p, iDel, apVal, bSaveRow); } } @@ -253323,7 +256917,9 @@ static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **ap } /* Delete the %_content record */ - if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ if( rc==SQLITE_OK ){ rc = fts5StorageGetStmt(p, FTS5_STMT_DELETE_CONTENT, &pDel, 0); } @@ -253355,8 +256951,13 @@ static int sqlite3Fts5StorageDeleteAll(Fts5Storage *p){ ); if( rc==SQLITE_OK && pConfig->bColumnsize ){ rc = fts5ExecPrintf(pConfig->db, 0, - "DELETE FROM %Q.'%q_docsize';", - pConfig->zDb, pConfig->zName + "DELETE FROM %Q.'%q_docsize';", pConfig->zDb, pConfig->zName + ); + } + + if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_UNINDEXED ){ + rc = fts5ExecPrintf(pConfig->db, 0, + "DELETE FROM %Q.'%q_content';", pConfig->zDb, pConfig->zName ); } @@ -253397,14 +256998,36 @@ static int sqlite3Fts5StorageRebuild(Fts5Storage *p){ for(ctx.iCol=0; rc==SQLITE_OK && ctx.iColnCol; ctx.iCol++){ ctx.szCol = 0; if( pConfig->abUnindexed[ctx.iCol]==0 ){ - const char *zText = (const char*)sqlite3_column_text(pScan, ctx.iCol+1); - int nText = sqlite3_column_bytes(pScan, ctx.iCol+1); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageInsertCallback - ); + int nText = 0; /* Size of pText in bytes */ + const char *pText = 0; /* Pointer to buffer containing text value */ + int nLoc = 0; /* Size of pLoc in bytes */ + const char *pLoc = 0; /* Pointer to buffer containing text value */ + + sqlite3_value *pVal = sqlite3_column_value(pScan, ctx.iCol+1); + if( pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale ){ + int iCol = ctx.iCol + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(pScan, iCol); + nLoc = sqlite3_column_bytes(pScan, iCol); + } + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, + pText, nText, + (void*)&ctx, + fts5StorageInsertCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } } sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol); p->aTotalSize[ctx.iCol] += (i64)ctx.szCol; @@ -253470,6 +257093,7 @@ static int fts5StorageNewRowid(Fts5Storage *p, i64 *piRowid){ */ static int sqlite3Fts5StorageContentInsert( Fts5Storage *p, + int bReplace, /* True to use REPLACE instead of INSERT */ sqlite3_value **apVal, i64 *piRowid ){ @@ -253477,7 +257101,9 @@ static int sqlite3Fts5StorageContentInsert( int rc = SQLITE_OK; /* Insert the new row into the %_content table. */ - if( pConfig->eContent!=FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent!=FTS5_CONTENT_NORMAL + && pConfig->eContent!=FTS5_CONTENT_UNINDEXED + ){ if( sqlite3_value_type(apVal[1])==SQLITE_INTEGER ){ *piRowid = sqlite3_value_int64(apVal[1]); }else{ @@ -253486,9 +257112,52 @@ static int sqlite3Fts5StorageContentInsert( }else{ sqlite3_stmt *pInsert = 0; /* Statement to write %_content table */ int i; /* Counter variable */ - rc = fts5StorageGetStmt(p, FTS5_STMT_INSERT_CONTENT, &pInsert, 0); - for(i=1; rc==SQLITE_OK && i<=pConfig->nCol+1; i++){ - rc = sqlite3_bind_value(pInsert, i, apVal[i]); + + assert( FTS5_STMT_INSERT_CONTENT+1==FTS5_STMT_REPLACE_CONTENT ); + assert( bReplace==0 || bReplace==1 ); + rc = fts5StorageGetStmt(p, FTS5_STMT_INSERT_CONTENT+bReplace, &pInsert, 0); + if( pInsert ) sqlite3_clear_bindings(pInsert); + + /* Bind the rowid value */ + sqlite3_bind_value(pInsert, 1, apVal[1]); + + /* Loop through values for user-defined columns. i=2 is the leftmost + ** user-defined column. As is column 1 of pSavedRow. */ + for(i=2; rc==SQLITE_OK && i<=pConfig->nCol+1; i++){ + int bUnindexed = pConfig->abUnindexed[i-2]; + if( pConfig->eContent==FTS5_CONTENT_NORMAL || bUnindexed ){ + sqlite3_value *pVal = apVal[i]; + + if( sqlite3_value_nochange(pVal) && p->pSavedRow ){ + /* This is an UPDATE statement, and user-defined column (i-2) was not + ** modified. Retrieve the value from Fts5Storage.pSavedRow. */ + pVal = sqlite3_column_value(p->pSavedRow, i-1); + if( pConfig->bLocale && bUnindexed==0 ){ + sqlite3_bind_value(pInsert, pConfig->nCol + i, + sqlite3_column_value(p->pSavedRow, pConfig->nCol + i - 1) + ); + } + }else if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + const char *pText = 0; + const char *pLoc = 0; + int nText = 0; + int nLoc = 0; + assert( pConfig->bLocale ); + + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + if( rc==SQLITE_OK ){ + sqlite3_bind_text(pInsert, i, pText, nText, SQLITE_TRANSIENT); + if( bUnindexed==0 ){ + int iLoc = pConfig->nCol + i; + sqlite3_bind_text(pInsert, iLoc, pLoc, nLoc, SQLITE_TRANSIENT); + } + } + + continue; + } + + rc = sqlite3_bind_value(pInsert, i, pVal); + } } if( rc==SQLITE_OK ){ sqlite3_step(pInsert); @@ -253523,14 +257192,38 @@ static int sqlite3Fts5StorageIndexInsert( for(ctx.iCol=0; rc==SQLITE_OK && ctx.iColnCol; ctx.iCol++){ ctx.szCol = 0; if( pConfig->abUnindexed[ctx.iCol]==0 ){ - const char *zText = (const char*)sqlite3_value_text(apVal[ctx.iCol+2]); - int nText = sqlite3_value_bytes(apVal[ctx.iCol+2]); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageInsertCallback - ); + int nText = 0; /* Size of pText in bytes */ + const char *pText = 0; /* Pointer to buffer containing text value */ + int nLoc = 0; /* Size of pText in bytes */ + const char *pLoc = 0; /* Pointer to buffer containing text value */ + + sqlite3_value *pVal = apVal[ctx.iCol+2]; + if( p->pSavedRow && sqlite3_value_nochange(pVal) ){ + pVal = sqlite3_column_value(p->pSavedRow, ctx.iCol+1); + if( pConfig->eContent==FTS5_CONTENT_NORMAL && pConfig->bLocale ){ + int iCol = ctx.iCol + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(p->pSavedRow, iCol); + nLoc = sqlite3_column_bytes(p->pSavedRow, iCol); + } + }else{ + pVal = apVal[ctx.iCol+2]; + } + + if( pConfig->bLocale && sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, pText, nText, (void*)&ctx, + fts5StorageInsertCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } } sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol); p->aTotalSize[ctx.iCol] += (i64)ctx.szCol; @@ -253694,29 +257387,61 @@ static int sqlite3Fts5StorageIntegrity(Fts5Storage *p, int iArg){ rc = sqlite3Fts5TermsetNew(&ctx.pTermset); } for(i=0; rc==SQLITE_OK && inCol; i++){ - if( pConfig->abUnindexed[i] ) continue; - ctx.iCol = i; - ctx.szCol = 0; - if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ - rc = sqlite3Fts5TermsetNew(&ctx.pTermset); - } - if( rc==SQLITE_OK ){ - const char *zText = (const char*)sqlite3_column_text(pScan, i+1); - int nText = sqlite3_column_bytes(pScan, i+1); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageIntegrityCallback - ); - } - if( rc==SQLITE_OK && pConfig->bColumnsize && ctx.szCol!=aColSize[i] ){ - rc = FTS5_CORRUPT; - } - aTotalSize[i] += ctx.szCol; - if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ - sqlite3Fts5TermsetFree(ctx.pTermset); - ctx.pTermset = 0; + if( pConfig->abUnindexed[i]==0 ){ + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + sqlite3_value *pVal = sqlite3_column_value(pScan, i+1); + + if( pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue( + pVal, &pText, &nText, &pLoc, &nLoc + ); + }else{ + if( pConfig->eContent==FTS5_CONTENT_NORMAL && pConfig->bLocale ){ + int iCol = i + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(pScan, iCol); + nLoc = sqlite3_column_bytes(pScan, iCol); + } + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + } + + ctx.iCol = i; + ctx.szCol = 0; + + if( rc==SQLITE_OK && pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ + rc = sqlite3Fts5TermsetNew(&ctx.pTermset); + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, + pText, nText, + (void*)&ctx, + fts5StorageIntegrityCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } + + /* If this is not a columnsize=0 database, check that the number + ** of tokens in the value matches the aColSize[] value read from + ** the %_docsize table. */ + if( rc==SQLITE_OK + && pConfig->bColumnsize + && ctx.szCol!=aColSize[i] + ){ + rc = FTS5_CORRUPT; + } + aTotalSize[i] += ctx.szCol; + if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ + sqlite3Fts5TermsetFree(ctx.pTermset); + ctx.pTermset = 0; + } } } sqlite3Fts5TermsetFree(ctx.pTermset); @@ -254023,7 +257748,7 @@ static int fts5AsciiCreate( int i; memset(p, 0, sizeof(AsciiTokenizer)); memcpy(p->aTokenChar, aAsciiTokenChar, sizeof(aAsciiTokenChar)); - for(i=0; rc==SQLITE_OK && i=0xc0 ){ \ c = sqlite3Utf8Trans1[c-0xc0]; \ - while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ + while( zInpTokenizer ){ - p->tokenizer.xDelete(p->pTokenizer); + p->tokenizer_v2.xDelete(p->pTokenizer); } sqlite3_free(p); } @@ -254531,6 +258253,7 @@ static int fts5PorterCreate( PorterTokenizer *pRet; void *pUserdata = 0; const char *zBase = "unicode61"; + fts5_tokenizer_v2 *pV2 = 0; if( nArg>0 ){ zBase = azArg[0]; @@ -254539,14 +258262,15 @@ static int fts5PorterCreate( pRet = (PorterTokenizer*)sqlite3_malloc(sizeof(PorterTokenizer)); if( pRet ){ memset(pRet, 0, sizeof(PorterTokenizer)); - rc = pApi->xFindTokenizer(pApi, zBase, &pUserdata, &pRet->tokenizer); + rc = pApi->xFindTokenizer_v2(pApi, zBase, &pUserdata, &pV2); }else{ rc = SQLITE_NOMEM; } if( rc==SQLITE_OK ){ int nArg2 = (nArg>0 ? nArg-1 : 0); - const char **azArg2 = (nArg2 ? &azArg[1] : 0); - rc = pRet->tokenizer.xCreate(pUserdata, azArg2, nArg2, &pRet->pTokenizer); + const char **az2 = (nArg2 ? &azArg[1] : 0); + memcpy(&pRet->tokenizer_v2, pV2, sizeof(fts5_tokenizer_v2)); + rc = pRet->tokenizer_v2.xCreate(pUserdata, az2, nArg2, &pRet->pTokenizer); } if( rc!=SQLITE_OK ){ @@ -255197,6 +258921,7 @@ static int fts5PorterTokenize( void *pCtx, int flags, const char *pText, int nText, + const char *pLoc, int nLoc, int (*xToken)(void*, int, const char*, int nToken, int iStart, int iEnd) ){ PorterTokenizer *p = (PorterTokenizer*)pTokenizer; @@ -255204,8 +258929,8 @@ static int fts5PorterTokenize( sCtx.xToken = xToken; sCtx.pCtx = pCtx; sCtx.aBuf = p->aBuf; - return p->tokenizer.xTokenize( - p->pTokenizer, (void*)&sCtx, flags, pText, nText, fts5PorterCb + return p->tokenizer_v2.xTokenize( + p->pTokenizer, (void*)&sCtx, flags, pText, nText, pLoc, nLoc, fts5PorterCb ); } @@ -255235,41 +258960,46 @@ static int fts5TriCreate( Fts5Tokenizer **ppOut ){ int rc = SQLITE_OK; - TrigramTokenizer *pNew = (TrigramTokenizer*)sqlite3_malloc(sizeof(*pNew)); + TrigramTokenizer *pNew = 0; UNUSED_PARAM(pUnused); - if( pNew==0 ){ - rc = SQLITE_NOMEM; + if( nArg%2 ){ + rc = SQLITE_ERROR; }else{ int i; - pNew->bFold = 1; - pNew->iFoldParam = 0; - for(i=0; rc==SQLITE_OK && ibFold = 1; + pNew->iFoldParam = 0; + + for(i=0; rc==SQLITE_OK && ibFold = (zArg[0]=='0'); + } + }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){ + if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){ + rc = SQLITE_ERROR; + }else{ + pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0; + } }else{ - pNew->bFold = (zArg[0]=='0'); - } - }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){ - if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){ rc = SQLITE_ERROR; - }else{ - pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0; } - }else{ - rc = SQLITE_ERROR; } - } - if( iiFoldParam!=0 && pNew->bFold==0 ){ - rc = SQLITE_ERROR; - } + if( pNew->iFoldParam!=0 && pNew->bFold==0 ){ + rc = SQLITE_ERROR; + } - if( rc!=SQLITE_OK ){ - fts5TriDelete((Fts5Tokenizer*)pNew); - pNew = 0; + if( rc!=SQLITE_OK ){ + fts5TriDelete((Fts5Tokenizer*)pNew); + pNew = 0; + } } } *ppOut = (Fts5Tokenizer*)pNew; @@ -255292,8 +259022,8 @@ static int fts5TriTokenize( char *zOut = aBuf; int ii; const unsigned char *zIn = (const unsigned char*)pText; - const unsigned char *zEof = &zIn[nText]; - u32 iCode; + const unsigned char *zEof = (zIn ? &zIn[nText] : 0); + u32 iCode = 0; int aStart[3]; /* Input offset of each character in aBuf[] */ UNUSED_PARAM(unusedFlags); @@ -255302,8 +259032,8 @@ static int fts5TriTokenize( for(ii=0; ii<3; ii++){ do { aStart[ii] = zIn - (const unsigned char*)pText; + if( zIn>=zEof ) return SQLITE_OK; READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) return SQLITE_OK; if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); }while( iCode==0 ); WRITE_UTF8(zOut, iCode); @@ -255324,8 +259054,11 @@ static int fts5TriTokenize( /* Read characters from the input up until the first non-diacritic */ do { iNext = zIn - (const unsigned char*)pText; + if( zIn>=zEof ){ + iCode = 0; + break; + } READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); }while( iCode==0 ); @@ -255374,6 +259107,16 @@ static int sqlite3Fts5TokenizerPattern( return FTS5_PATTERN_NONE; } +/* +** Return true if the tokenizer described by p->azArg[] is the trigram +** tokenizer. This tokenizer needs to be loaded before xBestIndex is +** called for the first time in order to correctly handle LIKE/GLOB. +*/ +static int sqlite3Fts5TokenizerPreload(Fts5TokenizerConfig *p){ + return (p->nArg>=1 && 0==sqlite3_stricmp(p->azArg[0], "trigram")); +} + + /* ** Register all built-in tokenizers with FTS5. */ @@ -255384,7 +259127,6 @@ static int sqlite3Fts5TokenizerInit(fts5_api *pApi){ } aBuiltin[] = { { "unicode61", {fts5UnicodeCreate, fts5UnicodeDelete, fts5UnicodeTokenize}}, { "ascii", {fts5AsciiCreate, fts5AsciiDelete, fts5AsciiTokenize }}, - { "porter", {fts5PorterCreate, fts5PorterDelete, fts5PorterTokenize }}, { "trigram", {fts5TriCreate, fts5TriDelete, fts5TriTokenize}}, }; @@ -255399,7 +259141,20 @@ static int sqlite3Fts5TokenizerInit(fts5_api *pApi){ 0 ); } - + if( rc==SQLITE_OK ){ + fts5_tokenizer_v2 sPorter = { + 2, + fts5PorterCreate, + fts5PorterDelete, + fts5PorterTokenize + }; + rc = pApi->xCreateTokenizer_v2(pApi, + "porter", + (void*)pApi, + &sPorter, + 0 + ); + } return rc; } @@ -255769,6 +259524,9 @@ static int sqlite3Fts5UnicodeCatParse(const char *zCat, u8 *aArray){ default: return 1; } break; + + default: + return 1; } return 0; } @@ -256593,6 +260351,7 @@ struct Fts5VocabCursor { int nLeTerm; /* Size of zLeTerm in bytes */ char *zLeTerm; /* (term <= $zLeTerm) paramater, or NULL */ + int colUsed; /* Copy of sqlite3_index_info.colUsed */ /* These are used by 'col' tables only */ int iCol; @@ -256619,9 +260378,11 @@ struct Fts5VocabCursor { /* ** Bits for the mask used as the idxNum value by xBestIndex/xFilter. */ -#define FTS5_VOCAB_TERM_EQ 0x01 -#define FTS5_VOCAB_TERM_GE 0x02 -#define FTS5_VOCAB_TERM_LE 0x04 +#define FTS5_VOCAB_TERM_EQ 0x0100 +#define FTS5_VOCAB_TERM_GE 0x0200 +#define FTS5_VOCAB_TERM_LE 0x0400 + +#define FTS5_VOCAB_COLUSED_MASK 0xFF /* @@ -256798,11 +260559,13 @@ static int fts5VocabBestIndexMethod( int iTermEq = -1; int iTermGe = -1; int iTermLe = -1; - int idxNum = 0; + int idxNum = (int)pInfo->colUsed; int nArg = 0; UNUSED_PARAM(pUnused); + assert( (pInfo->colUsed & FTS5_VOCAB_COLUSED_MASK)==pInfo->colUsed ); + for(i=0; inConstraint; i++){ struct sqlite3_index_constraint *p = &pInfo->aConstraint[i]; if( p->usable==0 ) continue; @@ -256894,7 +260657,7 @@ static int fts5VocabOpenMethod( if( rc==SQLITE_OK ){ pVTab->zErrMsg = sqlite3_mprintf( "no such fts5 table: %s.%s", pTab->zFts5Db, pTab->zFts5Tbl - ); + ); rc = SQLITE_ERROR; } }else{ @@ -257054,9 +260817,19 @@ static int fts5VocabNextMethod(sqlite3_vtab_cursor *pCursor){ switch( pTab->eType ){ case FTS5_VOCAB_ROW: - if( eDetail==FTS5_DETAIL_FULL ){ - while( 0==sqlite3Fts5PoslistNext64(pPos, nPos, &iOff, &iPos) ){ - pCsr->aCnt[0]++; + /* Do not bother counting the number of instances if the "cnt" + ** column is not being read (according to colUsed). */ + if( eDetail==FTS5_DETAIL_FULL && (pCsr->colUsed & 0x04) ){ + while( iPosaCnt[] */ + pCsr->aCnt[0]++; + } } } pCsr->aDoc[0]++; @@ -257154,6 +260927,7 @@ static int fts5VocabFilterMethod( if( idxNum & FTS5_VOCAB_TERM_EQ ) pEq = apVal[iVal++]; if( idxNum & FTS5_VOCAB_TERM_GE ) pGe = apVal[iVal++]; if( idxNum & FTS5_VOCAB_TERM_LE ) pLe = apVal[iVal++]; + pCsr->colUsed = (idxNum & FTS5_VOCAB_COLUSED_MASK); if( pEq ){ zTerm = (const char *)sqlite3_value_text(pEq); @@ -257321,7 +261095,7 @@ static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){ } - +/* Here ends the fts5.c composite file. */ #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) */ /************** End of fts5.c ************************************************/ @@ -257677,363 +261451,9 @@ SQLITE_API int sqlite3_stmt_init( /************** End of stmt.c ************************************************/ /* Return the source-id for this library */ SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } +#endif /* SQLITE_AMALGAMATION */ /************************** End of sqlite3.c ******************************/ #else // USE_LIBSQLITE3 // If users really want to link against the system sqlite3 we // need to make this file a noop. - #endif -/* -** 2014-09-08 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains the bulk of the implementation of the -** user-authentication extension feature. Some parts of the user- -** authentication code are contained within the SQLite core (in the -** src/ subdirectory of the main source code tree) but those parts -** that could reasonable be separated out are moved into this file. -** -** To compile with the user-authentication feature, append this file to -** end of an SQLite amalgamation, then add the SQLITE_USER_AUTHENTICATION -** compile-time option. See the user-auth.txt file in the same source -** directory as this file for additional information. -*/ -#ifdef SQLITE_USER_AUTHENTICATION -#ifndef SQLITEINT_H -# include "sqliteInt.h" -#endif - -/* -** Prepare an SQL statement for use by the user authentication logic. -** Return a pointer to the prepared statement on success. Return a -** NULL pointer if there is an error of any kind. -*/ -static sqlite3_stmt *sqlite3UserAuthPrepare( - sqlite3 *db, - const char *zFormat, - ... -){ - sqlite3_stmt *pStmt; - char *zSql; - int rc; - va_list ap; - u64 savedFlags = db->flags; - - va_start(ap, zFormat); - zSql = sqlite3_vmprintf(zFormat, ap); - va_end(ap); - if( zSql==0 ) return 0; - db->flags |= SQLITE_WriteSchema; - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); - db->flags = savedFlags; - sqlite3_free(zSql); - if( rc ){ - sqlite3_finalize(pStmt); - pStmt = 0; - } - return pStmt; -} - -/* -** Check to see if the sqlite_user table exists in database zDb. -*/ -static int userTableExists(sqlite3 *db, const char *zDb){ - int rc; - sqlite3_mutex_enter(db->mutex); - sqlite3BtreeEnterAll(db); - if( db->init.busy==0 ){ - char *zErr = 0; - sqlite3Init(db, &zErr); - sqlite3DbFree(db, zErr); - } - rc = sqlite3FindTable(db, "sqlite_user", zDb)!=0; - sqlite3BtreeLeaveAll(db); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/* -** Check to see if database zDb has a "sqlite_user" table and if it does -** whether that table can authenticate zUser with nPw,zPw. Write one of -** the UAUTH_* user authorization level codes into *peAuth and return a -** result code. -*/ -static int userAuthCheckLogin( - sqlite3 *db, /* The database connection to check */ - const char *zDb, /* Name of specific database to check */ - u8 *peAuth /* OUT: One of UAUTH_* constants */ -){ - sqlite3_stmt *pStmt; - int rc; - - *peAuth = UAUTH_Unknown; - if( !userTableExists(db, "main") ){ - *peAuth = UAUTH_Admin; /* No sqlite_user table. Everybody is admin. */ - return SQLITE_OK; - } - if( db->auth.zAuthUser==0 ){ - *peAuth = UAUTH_Fail; - return SQLITE_OK; - } - pStmt = sqlite3UserAuthPrepare(db, - "SELECT pw=sqlite_crypt(?1,pw), isAdmin FROM \"%w\".sqlite_user" - " WHERE uname=?2", zDb); - if( pStmt==0 ) return SQLITE_NOMEM; - sqlite3_bind_blob(pStmt, 1, db->auth.zAuthPW, db->auth.nAuthPW,SQLITE_STATIC); - sqlite3_bind_text(pStmt, 2, db->auth.zAuthUser, -1, SQLITE_STATIC); - rc = sqlite3_step(pStmt); - if( rc==SQLITE_ROW && sqlite3_column_int(pStmt,0) ){ - *peAuth = sqlite3_column_int(pStmt, 1) + UAUTH_User; - }else{ - *peAuth = UAUTH_Fail; - } - return sqlite3_finalize(pStmt); -} -int sqlite3UserAuthCheckLogin( - sqlite3 *db, /* The database connection to check */ - const char *zDb, /* Name of specific database to check */ - u8 *peAuth /* OUT: One of UAUTH_* constants */ -){ - int rc; - u8 savedAuthLevel; - assert( zDb!=0 ); - assert( peAuth!=0 ); - savedAuthLevel = db->auth.authLevel; - db->auth.authLevel = UAUTH_Admin; - rc = userAuthCheckLogin(db, zDb, peAuth); - db->auth.authLevel = savedAuthLevel; - return rc; -} - -/* -** If the current authLevel is UAUTH_Unknown, the take actions to figure -** out what authLevel should be -*/ -void sqlite3UserAuthInit(sqlite3 *db){ - if( db->auth.authLevel==UAUTH_Unknown ){ - u8 authLevel = UAUTH_Fail; - sqlite3UserAuthCheckLogin(db, "main", &authLevel); - db->auth.authLevel = authLevel; - if( authLevelflags &= ~SQLITE_WriteSchema; - } -} - -/* -** Implementation of the sqlite_crypt(X,Y) function. -** -** If Y is NULL then generate a new hash for password X and return that -** hash. If Y is not null, then generate a hash for password X using the -** same salt as the previous hash Y and return the new hash. -*/ -void sqlite3CryptFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **argv -){ - const char *zIn; - int nIn, ii; - u8 *zOut; - char zSalt[8]; - zIn = sqlite3_value_blob(argv[0]); - nIn = sqlite3_value_bytes(argv[0]); - if( sqlite3_value_type(argv[1])==SQLITE_BLOB - && sqlite3_value_bytes(argv[1])==nIn+sizeof(zSalt) - ){ - memcpy(zSalt, sqlite3_value_blob(argv[1]), sizeof(zSalt)); - }else{ - sqlite3_randomness(sizeof(zSalt), zSalt); - } - zOut = sqlite3_malloc( nIn+sizeof(zSalt) ); - if( zOut==0 ){ - sqlite3_result_error_nomem(context); - }else{ - memcpy(zOut, zSalt, sizeof(zSalt)); - for(ii=0; iiauth.authLevel = UAUTH_Unknown; - sqlite3_free(db->auth.zAuthUser); - sqlite3_free(db->auth.zAuthPW); - memset(&db->auth, 0, sizeof(db->auth)); - db->auth.zAuthUser = sqlite3_mprintf("%s", zUsername); - if( db->auth.zAuthUser==0 ) return SQLITE_NOMEM; - db->auth.zAuthPW = sqlite3_malloc( nPW+1 ); - if( db->auth.zAuthPW==0 ) return SQLITE_NOMEM; - memcpy(db->auth.zAuthPW,zPW,nPW); - db->auth.nAuthPW = nPW; - rc = sqlite3UserAuthCheckLogin(db, "main", &authLevel); - db->auth.authLevel = authLevel; - sqlite3ExpirePreparedStatements(db, 0); - if( rc ){ - return rc; /* OOM error, I/O error, etc. */ - } - if( authLevelauth.authLevelauth.zAuthUser==0 ){ - assert( isAdmin!=0 ); - sqlite3_user_authenticate(db, zUsername, aPW, nPW); - } - return SQLITE_OK; -} - -/* -** The sqlite3_user_change() interface can be used to change a users -** login credentials or admin privilege. Any user can change their own -** login credentials. Only an admin user can change another users login -** credentials or admin privilege setting. No user may change their own -** admin privilege setting. -*/ -int sqlite3_user_change( - sqlite3 *db, /* Database connection */ - const char *zUsername, /* Username to change */ - const char *aPW, /* Modified password or credentials */ - int nPW, /* Number of bytes in aPW[] */ - int isAdmin /* Modified admin privilege for the user */ -){ - sqlite3_stmt *pStmt; - int rc; - u8 authLevel; - - authLevel = db->auth.authLevel; - if( authLevelauth.zAuthUser, zUsername)!=0 ){ - if( db->auth.authLevelauth.authLevel = UAUTH_Admin; - if( !userTableExists(db, "main") ){ - /* This routine is a no-op if the user to be modified does not exist */ - }else{ - pStmt = sqlite3UserAuthPrepare(db, - "UPDATE sqlite_user SET isAdmin=%d, pw=sqlite_crypt(?1,NULL)" - " WHERE uname=%Q", isAdmin, zUsername); - if( pStmt==0 ){ - rc = SQLITE_NOMEM; - }else{ - sqlite3_bind_blob(pStmt, 1, aPW, nPW, SQLITE_STATIC); - sqlite3_step(pStmt); - rc = sqlite3_finalize(pStmt); - } - } - db->auth.authLevel = authLevel; - return rc; -} - -/* -** The sqlite3_user_delete() interface can be used (by an admin user only) -** to delete a user. The currently logged-in user cannot be deleted, -** which guarantees that there is always an admin user and hence that -** the database cannot be converted into a no-authentication-required -** database. -*/ -int sqlite3_user_delete( - sqlite3 *db, /* Database connection */ - const char *zUsername /* Username to remove */ -){ - sqlite3_stmt *pStmt; - if( db->auth.authLevelauth.zAuthUser, zUsername)==0 ){ - /* Cannot delete self */ - return SQLITE_AUTH; - } - if( !userTableExists(db, "main") ){ - /* This routine is a no-op if the user to be deleted does not exist */ - return SQLITE_OK; - } - pStmt = sqlite3UserAuthPrepare(db, - "DELETE FROM sqlite_user WHERE uname=%Q", zUsername); - if( pStmt==0 ) return SQLITE_NOMEM; - sqlite3_step(pStmt); - return sqlite3_finalize(pStmt); -} - -#endif /* SQLITE_USER_AUTHENTICATION */ + #endif \ No newline at end of file diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index d67a4adb..5e07ce68 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -147,9 +147,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.46.1" -#define SQLITE_VERSION_NUMBER 3046001 -#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33" +#define SQLITE_VERSION "3.49.1" +#define SQLITE_VERSION_NUMBER 3049001 +#define SQLITE_SOURCE_ID "2025-02-18 13:38:58 873d4e274b4988d260ba8354a9718324a1c26187a4ab4c1cc0227c03d0f10e70" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -653,6 +653,13 @@ SQLITE_API int sqlite3_exec( ** filesystem supports doing multiple write operations atomically when those ** write operations are bracketed by [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] and ** [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]. +** +** The SQLITE_IOCAP_SUBPAGE_READ property means that it is ok to read +** from the database file in amounts that are not a multiple of the +** page size and that do not begin at a page boundary. Without this +** property, SQLite is careful to only do full-page reads and write +** on aligned pages, with the one exception that it will do a sub-page +** read of the first page to access the database header. */ #define SQLITE_IOCAP_ATOMIC 0x00000001 #define SQLITE_IOCAP_ATOMIC512 0x00000002 @@ -669,6 +676,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 #define SQLITE_IOCAP_IMMUTABLE 0x00002000 #define SQLITE_IOCAP_BATCH_ATOMIC 0x00004000 +#define SQLITE_IOCAP_SUBPAGE_READ 0x00008000 /* ** CAPI3REF: File Locking Levels @@ -773,8 +781,8 @@ struct sqlite3_file { ** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, -** PENDING, or EXCLUSIVE lock on the file. It returns true -** if such a lock exists and false otherwise. +** PENDING, or EXCLUSIVE lock on the file. It returns, via its output +** pointer parameter, true if such a lock exists and false otherwise. ** ** The xFileControl() method is a generic interface that allows custom ** VFS implementations to directly control an open file using the @@ -815,6 +823,7 @@ struct sqlite3_file { **
  • [SQLITE_IOCAP_POWERSAFE_OVERWRITE] **
  • [SQLITE_IOCAP_IMMUTABLE] **
  • [SQLITE_IOCAP_BATCH_ATOMIC] +**
  • [SQLITE_IOCAP_SUBPAGE_READ] ** ** ** The SQLITE_IOCAP_ATOMIC property means that all writes of @@ -1092,6 +1101,11 @@ struct sqlite3_io_methods { ** pointed to by the pArg argument. This capability is used during testing ** and only needs to be supported when SQLITE_TEST is defined. ** +**
  • [[SQLITE_FCNTL_NULL_IO]] +** The [SQLITE_FCNTL_NULL_IO] opcode sets the low-level file descriptor +** or file handle for the [sqlite3_file] object such that it will no longer +** read or write to the database file. +** **
  • [[SQLITE_FCNTL_WAL_BLOCK]] ** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might ** be advantageous to block on the next WAL lock if the lock is not immediately @@ -1245,6 +1259,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_EXTERNAL_READER 40 #define SQLITE_FCNTL_CKSM_FILE 41 #define SQLITE_FCNTL_RESET_CACHE 42 +#define SQLITE_FCNTL_NULL_IO 43 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -2197,7 +2212,15 @@ struct sqlite3_mem_methods { ** CAPI3REF: Database Connection Configuration Options ** ** These constants are the available integer configuration options that -** can be passed as the second argument to the [sqlite3_db_config()] interface. +** can be passed as the second parameter to the [sqlite3_db_config()] interface. +** +** The [sqlite3_db_config()] interface is a var-args functions. It takes a +** variable number of parameters, though always at least two. The number of +** parameters passed into sqlite3_db_config() depends on which of these +** constants is given as the second parameter. This documentation page +** refers to parameters beyond the second as "arguments". Thus, when this +** page says "the N-th argument" it means "the N-th parameter past the +** configuration option" or "the (N+2)-th parameter to sqlite3_db_config()". ** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications @@ -2209,8 +2232,14 @@ struct sqlite3_mem_methods { **
    ** [[SQLITE_DBCONFIG_LOOKASIDE]] **
    SQLITE_DBCONFIG_LOOKASIDE
    -**
    ^This option takes three additional arguments that determine the -** [lookaside memory allocator] configuration for the [database connection]. +**
    The SQLITE_DBCONFIG_LOOKASIDE option is used to adjust the +** configuration of the lookaside memory allocator within a database +** connection. +** The arguments to the SQLITE_DBCONFIG_LOOKASIDE option are not +** in the [DBCONFIG arguments|usual format]. +** The SQLITE_DBCONFIG_LOOKASIDE option takes three arguments, not two, +** so that a call to [sqlite3_db_config()] that uses SQLITE_DBCONFIG_LOOKASIDE +** should have a total of five parameters. ** ^The first argument (the third parameter to [sqlite3_db_config()] is a ** pointer to a memory buffer to use for lookaside memory. ** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb @@ -2233,7 +2262,8 @@ struct sqlite3_mem_methods { ** [[SQLITE_DBCONFIG_ENABLE_FKEY]] **
    SQLITE_DBCONFIG_ENABLE_FKEY
    **
    ^This option is used to enable or disable the enforcement of -** [foreign key constraints]. There should be two additional arguments. +** [foreign key constraints]. This is the same setting that is +** enabled or disabled by the [PRAGMA foreign_keys] statement. ** The first argument is an integer which is 0 to disable FK enforcement, ** positive to enable FK enforcement or negative to leave FK enforcement ** unchanged. The second parameter is a pointer to an integer into which @@ -2255,13 +2285,13 @@ struct sqlite3_mem_methods { **

    Originally this option disabled all triggers. ^(However, since ** SQLite version 3.35.0, TEMP triggers are still allowed even if ** this option is off. So, in other words, this option now only disables -** triggers in the main database schema or in the schemas of ATTACH-ed +** triggers in the main database schema or in the schemas of [ATTACH]-ed ** databases.)^

    ** ** [[SQLITE_DBCONFIG_ENABLE_VIEW]] **
    SQLITE_DBCONFIG_ENABLE_VIEW
    **
    ^This option is used to enable or disable [CREATE VIEW | views]. -** There should be two additional arguments. +** There must be two additional arguments. ** The first argument is an integer which is 0 to disable views, ** positive to enable views or negative to leave the setting unchanged. ** The second parameter is a pointer to an integer into which @@ -2280,7 +2310,7 @@ struct sqlite3_mem_methods { **
    ^This option is used to enable or disable the ** [fts3_tokenizer()] function which is part of the ** [FTS3] full-text search engine extension. -** There should be two additional arguments. +** There must be two additional arguments. ** The first argument is an integer which is 0 to disable fts3_tokenizer() or ** positive to enable fts3_tokenizer() or negative to leave the setting ** unchanged. @@ -2295,7 +2325,7 @@ struct sqlite3_mem_methods { ** interface independently of the [load_extension()] SQL function. ** The [sqlite3_enable_load_extension()] API enables or disables both the ** C-API [sqlite3_load_extension()] and the SQL function [load_extension()]. -** There should be two additional arguments. +** There must be two additional arguments. ** When the first argument to this interface is 1, then only the C-API is ** enabled and the SQL function remains disabled. If the first argument to ** this interface is 0, then both the C-API and the SQL function are disabled. @@ -2309,23 +2339,30 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_DBCONFIG_MAINDBNAME]]
    SQLITE_DBCONFIG_MAINDBNAME
    **
    ^This option is used to change the name of the "main" database -** schema. ^The sole argument is a pointer to a constant UTF8 string -** which will become the new schema name in place of "main". ^SQLite -** does not make a copy of the new main schema name string, so the application -** must ensure that the argument passed into this DBCONFIG option is unchanged -** until after the database connection closes. +** schema. This option does not follow the +** [DBCONFIG arguments|usual SQLITE_DBCONFIG argument format]. +** This option takes exactly one additional argument so that the +** [sqlite3_db_config()] call has a total of three parameters. The +** extra argument must be a pointer to a constant UTF8 string which +** will become the new schema name in place of "main". ^SQLite does +** not make a copy of the new main schema name string, so the application +** must ensure that the argument passed into SQLITE_DBCONFIG MAINDBNAME +** is unchanged until after the database connection closes. **
    ** ** [[SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE]] **
    SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE
    -**
    Usually, when a database in wal mode is closed or detached from a -** database handle, SQLite checks if this will mean that there are now no -** connections at all to the database. If so, it performs a checkpoint -** operation before closing the connection. This option may be used to -** override this behavior. The first parameter passed to this operation -** is an integer - positive to disable checkpoints-on-close, or zero (the -** default) to enable them, and negative to leave the setting unchanged. -** The second parameter is a pointer to an integer +**
    Usually, when a database in [WAL mode] is closed or detached from a +** database handle, SQLite checks if if there are other connections to the +** same database, and if there are no other database connection (if the +** connection being closed is the last open connection to the database), +** then SQLite performs a [checkpoint] before closing the connection and +** deletes the WAL file. The SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE option can +** be used to override that behavior. The first argument passed to this +** operation (the third parameter to [sqlite3_db_config()]) is an integer +** which is positive to disable checkpoints-on-close, or zero (the default) +** to enable them, and negative to leave the setting unchanged. +** The second argument (the fourth parameter) is a pointer to an integer ** into which is written 0 or 1 to indicate whether checkpoints-on-close ** have been disabled - 0 if they are not disabled, 1 if they are. **
    @@ -2486,7 +2523,7 @@ struct sqlite3_mem_methods { ** statistics. For statistics to be collected, the flag must be set on ** the database handle both when the SQL statement is prepared and when it ** is stepped. The flag is set (collection of statistics is enabled) -** by default. This option takes two arguments: an integer and a pointer to +** by default.

    This option takes two arguments: an integer and a pointer to ** an integer.. The first argument is 1, 0, or -1 to enable, disable, or ** leave unchanged the statement scanstatus option. If the second argument ** is not NULL, then the value of the statement scanstatus setting after @@ -2500,7 +2537,7 @@ struct sqlite3_mem_methods { ** in which tables and indexes are scanned so that the scans start at the end ** and work toward the beginning rather than starting at the beginning and ** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the -** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** same as setting [PRAGMA reverse_unordered_selects].

    This option takes ** two arguments which are an integer and a pointer to an integer. The first ** argument is 1, 0, or -1 to enable, disable, or leave unchanged the ** reverse scan order flag, respectively. If the second argument is not NULL, @@ -2509,7 +2546,76 @@ struct sqlite3_mem_methods { ** first argument. ** ** +** [[SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE]] +**

    SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE
    +**
    The SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE option enables or disables +** the ability of the [ATTACH DATABASE] SQL command to create a new database +** file if the database filed named in the ATTACH command does not already +** exist. This ability of ATTACH to create a new database is enabled by +** default. Applications can disable or reenable the ability for ATTACH to +** create new database files using this DBCONFIG option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the attach-create flag, respectively. If the second +** argument is not NULL, then 0 or 1 is written into the integer that the +** second argument points to depending on if the attach-create flag is set +** after processing the first argument. +**

    +** +** [[SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE]] +**
    SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE
    +**
    The SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE option enables or disables the +** ability of the [ATTACH DATABASE] SQL command to open a database for writing. +** This capability is enabled by default. Applications can disable or +** reenable this capability using the current DBCONFIG option. If the +** the this capability is disabled, the [ATTACH] command will still work, +** but the database will be opened read-only. If this option is disabled, +** then the ability to create a new database using [ATTACH] is also disabled, +** regardless of the value of the [SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE] +** option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the ability to ATTACH another database for writing, +** respectively. If the second argument is not NULL, then 0 or 1 is written +** into the integer to which the second argument points, depending on whether +** the ability to ATTACH a read/write database is enabled or disabled +** after processing the first argument. +**

    +** +** [[SQLITE_DBCONFIG_ENABLE_COMMENTS]] +**
    SQLITE_DBCONFIG_ENABLE_COMMENTS
    +**
    The SQLITE_DBCONFIG_ENABLE_COMMENTS option enables or disables the +** ability to include comments in SQL text. Comments are enabled by default. +** An application can disable or reenable comments in SQL text using this +** DBCONFIG option.

    +** This option takes two arguments which are an integer and a pointer +** to an integer. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the ability to use comments in SQL text, +** respectively. If the second argument is not NULL, then 0 or 1 is written +** into the integer that the second argument points to depending on if +** comments are allowed in SQL text after processing the first argument. +**

    +** **
    +** +** [[DBCONFIG arguments]]

    Arguments To SQLITE_DBCONFIG Options

    +** +**

    Most of the SQLITE_DBCONFIG options take two arguments, so that the +** overall call to [sqlite3_db_config()] has a total of four parameters. +** The first argument (the third parameter to sqlite3_db_config()) is a integer. +** The second argument is a pointer to an integer. If the first argument is 1, +** then the option becomes enabled. If the first integer argument is 0, then the +** option is disabled. If the first argument is -1, then the option setting +** is unchanged. The second argument, the pointer to an integer, may be NULL. +** If the second argument is not NULL, then a value of 0 or 1 is written into +** the integer to which the second argument points, depending on whether the +** setting is disabled or enabled after applying any changes specified by +** the first argument. +** +**

    While most SQLITE_DBCONFIG options use the argument format +** described in the previous paragraph, the [SQLITE_DBCONFIG_MAINDBNAME] +** and [SQLITE_DBCONFIG_LOOKASIDE] options are different. See the +** documentation of those exceptional options for details. */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ #define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ @@ -2531,7 +2637,10 @@ struct sqlite3_mem_methods { #define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ #define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ #define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ -#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ +#define SQLITE_DBCONFIG_ENABLE_ATTACH_CREATE 1020 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_ATTACH_WRITE 1021 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_COMMENTS 1022 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1022 /* Largest DBCONFIG */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes @@ -2623,10 +2732,14 @@ SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64); ** deleted by the most recently completed INSERT, UPDATE or DELETE ** statement on the database connection specified by the only parameter. ** The two functions are identical except for the type of the return value -** and that if the number of rows modified by the most recent INSERT, UPDATE +** and that if the number of rows modified by the most recent INSERT, UPDATE, ** or DELETE is greater than the maximum value supported by type "int", then ** the return value of sqlite3_changes() is undefined. ^Executing any other ** type of SQL statement does not modify the value returned by these functions. +** For the purposes of this interface, a CREATE TABLE AS SELECT statement +** does not count as an INSERT, UPDATE or DELETE statement and hence the rows +** added to the new table by the CREATE TABLE AS SELECT statement are not +** counted. ** ** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are ** considered - auxiliary changes caused by [CREATE TRIGGER | triggers], @@ -3571,8 +3684,8 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** ** [[OPEN_EXRESCODE]] ^(

    [SQLITE_OPEN_EXRESCODE]
    **
    The database connection comes up in "extended result code mode". -** In other words, the database behaves has if -** [sqlite3_extended_result_codes(db,1)] where called on the database +** In other words, the database behaves as if +** [sqlite3_extended_result_codes(db,1)] were called on the database ** connection as soon as the connection is created. In addition to setting ** the extended result code mode, this flag also causes [sqlite3_open_v2()] ** to return an extended result code.
    @@ -4186,11 +4299,22 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); **
    The SQLITE_PREPARE_NO_VTAB flag causes the SQL compiler ** to return an error (error code SQLITE_ERROR) if the statement uses ** any virtual tables. +** +** [[SQLITE_PREPARE_DONT_LOG]]
    SQLITE_PREPARE_DONT_LOG
    +**
    The SQLITE_PREPARE_DONT_LOG flag prevents SQL compiler +** errors from being sent to the error log defined by +** [SQLITE_CONFIG_LOG]. This can be used, for example, to do test +** compiles to see if some SQL syntax is well-formed, without generating +** messages on the global error log when it is not. If the test compile +** fails, the sqlite3_prepare_v3() call returns the same error indications +** with or without this flag; it just omits the call to [sqlite3_log()] that +** logs the error. ** */ #define SQLITE_PREPARE_PERSISTENT 0x01 #define SQLITE_PREPARE_NORMALIZE 0x02 #define SQLITE_PREPARE_NO_VTAB 0x04 +#define SQLITE_PREPARE_DONT_LOG 0x10 /* ** CAPI3REF: Compiling An SQL Statement @@ -4223,13 +4347,17 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); ** and sqlite3_prepare16_v3() use UTF-16. ** ** ^If the nByte argument is negative, then zSql is read up to the -** first zero terminator. ^If nByte is positive, then it is the -** number of bytes read from zSql. ^If nByte is zero, then no prepared +** first zero terminator. ^If nByte is positive, then it is the maximum +** number of bytes read from zSql. When nByte is positive, zSql is read +** up to the first zero terminator or until the nByte bytes have been read, +** whichever comes first. ^If nByte is zero, then no prepared ** statement is generated. ** If the caller knows that the supplied string is nul-terminated, then ** there is a small performance advantage to passing an nByte parameter that ** is the number of bytes in the input string including ** the nul-terminator. +** Note that nByte measure the length of the input in bytes, not +** characters, even for the UTF-16 interfaces. ** ** ^If pzTail is not NULL then *pzTail is made to point to the first byte ** past the end of the first SQL statement in zSql. These routines only @@ -5600,7 +5728,7 @@ SQLITE_API int sqlite3_create_window_function( ** This flag instructs SQLite to omit some corner-case optimizations that ** might disrupt the operation of the [sqlite3_value_subtype()] function, ** causing it to return zero rather than the correct subtype(). -** SQL functions that invokes [sqlite3_value_subtype()] should have this +** All SQL functions that invoke [sqlite3_value_subtype()] should have this ** property. If the SQLITE_SUBTYPE property is omitted, then the return ** value from [sqlite3_value_subtype()] might sometimes be zero even though ** a non-zero subtype was specified by the function argument expression. @@ -5616,6 +5744,15 @@ SQLITE_API int sqlite3_create_window_function( ** [sqlite3_result_subtype()] should avoid setting this property, as the ** purpose of this property is to disable certain optimizations that are ** incompatible with subtypes. +** +** [[SQLITE_SELFORDER1]]
    SQLITE_SELFORDER1
    +** The SQLITE_SELFORDER1 flag indicates that the function is an aggregate +** that internally orders the values provided to the first argument. The +** ordered-set aggregate SQL notation with a single ORDER BY term can be +** used to invoke this function. If the ordered-set aggregate notation is +** used on a function that lacks this flag, then an error is raised. Note +** that the ordered-set aggregate syntax is only available if SQLite is +** built using the -DSQLITE_ENABLE_ORDERED_SET_AGGREGATES compile-time option. **
    ** */ @@ -5624,6 +5761,7 @@ SQLITE_API int sqlite3_create_window_function( #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 #define SQLITE_RESULT_SUBTYPE 0x001000000 +#define SQLITE_SELFORDER1 0x002000000 /* ** CAPI3REF: Deprecated Functions @@ -5821,7 +5959,7 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*); ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. ** -** Every [application-defined SQL function] that invoke this interface +** Every [application-defined SQL function] that invokes this interface ** should include the [SQLITE_SUBTYPE] property in the text ** encoding argument when the function is [sqlite3_create_function|registered]. ** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() @@ -7428,9 +7566,11 @@ struct sqlite3_module { ** will be returned by the strategy. ** ** The xBestIndex method may optionally populate the idxFlags field with a -** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag - -** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite -** assumes that the strategy may visit at most one row. +** mask of SQLITE_INDEX_SCAN_* flags. One such flag is +** [SQLITE_INDEX_SCAN_HEX], which if set causes the [EXPLAIN QUERY PLAN] +** output to show the idxNum has hex instead of as decimal. Another flag is +** SQLITE_INDEX_SCAN_UNIQUE, which if set indicates that the query plan will +** return at most one row. ** ** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then ** SQLite also assumes that if a call to the xUpdate() method is made as @@ -7494,7 +7634,9 @@ struct sqlite3_index_info { ** [sqlite3_index_info].idxFlags field to some combination of ** these bits. */ -#define SQLITE_INDEX_SCAN_UNIQUE 1 /* Scan visits at most 1 row */ +#define SQLITE_INDEX_SCAN_UNIQUE 0x00000001 /* Scan visits at most 1 row */ +#define SQLITE_INDEX_SCAN_HEX 0x00000002 /* Display idxNum as hex */ + /* in EXPLAIN QUERY PLAN */ /* ** CAPI3REF: Virtual Table Constraint Operator Codes @@ -8331,6 +8473,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_JSON_SELFCHECK 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */ +#define SQLITE_TESTCTRL_GETOPT 16 #define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */ #define SQLITE_TESTCTRL_INTERNAL_FUNCTIONS 17 #define SQLITE_TESTCTRL_LOCALTIME_FAULT 18 @@ -8350,7 +8493,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_TRACEFLAGS 31 #define SQLITE_TESTCTRL_TUNE 32 #define SQLITE_TESTCTRL_LOGEST 33 -#define SQLITE_TESTCTRL_USELONGDOUBLE 34 +#define SQLITE_TESTCTRL_USELONGDOUBLE 34 /* NOT USED */ #define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */ /* @@ -9326,6 +9469,16 @@ typedef struct sqlite3_backup sqlite3_backup; ** APIs are not strictly speaking threadsafe. If they are invoked at the ** same time as another thread is invoking sqlite3_backup_step() it is ** possible that they return invalid values. +** +** Alternatives To Using The Backup API +** +** Other techniques for safely creating a consistent backup of an SQLite +** database include: +** +**
      +**
    • The [VACUUM INTO] command. +**
    • The [sqlite3_rsync] utility program. +**
    */ SQLITE_API sqlite3_backup *sqlite3_backup_init( sqlite3 *pDest, /* Destination database handle */ @@ -10525,6 +10678,14 @@ typedef struct sqlite3_snapshot { ** If there is not already a read-transaction open on schema S when ** this function is called, one is opened automatically. ** +** If a read-transaction is opened by this function, then it is guaranteed +** that the returned snapshot object may not be invalidated by a database +** writer or checkpointer until after the read-transaction is closed. This +** is not guaranteed if a read-transaction is already open when this +** function is called. In that case, any subsequent write or checkpoint +** operation on the database may invalidate the returned snapshot handle, +** even while the read-transaction remains open. +** ** The following must be true for this function to succeed. If any of ** the following statements are false when sqlite3_snapshot_get() is ** called, SQLITE_ERROR is returned. The final value of *P is undefined @@ -10682,8 +10843,9 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c /* ** CAPI3REF: Serialize a database ** -** The sqlite3_serialize(D,S,P,F) interface returns a pointer to memory -** that is a serialization of the S database on [database connection] D. +** The sqlite3_serialize(D,S,P,F) interface returns a pointer to +** memory that is a serialization of the S database on +** [database connection] D. If S is a NULL pointer, the main database is used. ** If P is not a NULL pointer, then the size of the database in bytes ** is written into *P. ** @@ -10833,8 +10995,6 @@ SQLITE_API int sqlite3_deserialize( #if defined(__wasi__) # undef SQLITE_WASI # define SQLITE_WASI 1 -# undef SQLITE_OMIT_WAL -# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */ # ifndef SQLITE_OMIT_LOAD_EXTENSION # define SQLITE_OMIT_LOAD_EXTENSION # endif @@ -10846,7 +11006,7 @@ SQLITE_API int sqlite3_deserialize( #ifdef __cplusplus } /* End of the 'extern "C"' block */ #endif -#endif /* SQLITE3_H */ +/* #endif for SQLITE3_H will be added by mksqlite3.tcl */ /******** Begin file sqlite3rtree.h *********/ /* @@ -13037,6 +13197,10 @@ struct Fts5PhraseIter { ** (i.e. if it is a contentless table), then this API always iterates ** through an empty set (all calls to xPhraseFirst() set iCol to -1). ** +** In all cases, matches are visited in (column ASC, offset ASC) order. +** i.e. all those in column 0, sorted by offset, followed by those in +** column 1, etc. +** ** xPhraseNext() ** See xPhraseFirst above. ** @@ -13093,19 +13257,57 @@ struct Fts5PhraseIter { ** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, ** output variable (*ppToken) is set to point to a buffer containing the ** matching document token, and (*pnToken) to the size of that buffer in -** bytes. This API is not available if the specified token matches a -** prefix query term. In that case both output variables are always set -** to 0. +** bytes. ** ** The output text is not a copy of the document text that was tokenized. ** It is the output of the tokenizer module. For tokendata=1 tables, this ** includes any embedded 0x00 and trailing data. ** +** This API may be slow in some cases if the token identified by parameters +** iIdx and iToken matched a prefix token in the query. In most cases, the +** first call to this API for each prefix token in the query is forced +** to scan the portion of the full-text index that matches the prefix +** token to collect the extra data required by this API. If the prefix +** token matches a large number of token instances in the document set, +** this may be a performance problem. +** +** If the user knows in advance that a query may use this API for a +** prefix token, FTS5 may be configured to collect all required data as part +** of the initial querying of the full-text index, avoiding the second scan +** entirely. This also causes prefix queries that do not use this API to +** run more slowly and use more memory. FTS5 may be configured in this way +** either on a per-table basis using the [FTS5 insttoken | 'insttoken'] +** option, or on a per-query basis using the +** [fts5_insttoken | fts5_insttoken()] user function. +** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. +** +** xColumnLocale(pFts5, iIdx, pzLocale, pnLocale) +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the locale associated +** with column iCol of the current row. Usually, there is no associated +** locale, and output parameters (*pzLocale) and (*pnLocale) are set +** to NULL and 0, respectively. However, if the fts5_locale() function +** was used to associate a locale with the value when it was inserted +** into the fts5 table, then (*pzLocale) is set to point to a nul-terminated +** buffer containing the name of the locale in utf-8 encoding. (*pnLocale) +** is set to the size in bytes of the buffer, not including the +** nul-terminator. +** +** If successful, SQLITE_OK is returned. Or, if an error occurs, an +** SQLite error code is returned. The final value of the output parameters +** is undefined in this case. +** +** xTokenize_v2: +** Tokenize text using the tokenizer belonging to the FTS5 table. This +** API is the same as the xTokenize() API, except that it allows a tokenizer +** locale to be specified. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 4 */ void *(*xUserData)(Fts5Context*); @@ -13147,6 +13349,15 @@ struct Fts5ExtensionApi { const char **ppToken, int *pnToken ); int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); + + /* Below this point are iVersion>=4 only */ + int (*xColumnLocale)(Fts5Context*, int iCol, const char **pz, int *pn); + int (*xTokenize_v2)(Fts5Context*, + const char *pText, int nText, /* Text to tokenize */ + const char *pLocale, int nLocale, /* Locale to pass to tokenizer */ + void *pCtx, /* Context passed to xToken() */ + int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ + ); }; /* @@ -13167,7 +13378,7 @@ struct Fts5ExtensionApi { ** A tokenizer instance is required to actually tokenize text. ** ** The first argument passed to this function is a copy of the (void*) -** pointer provided by the application when the fts5_tokenizer object +** pointer provided by the application when the fts5_tokenizer_v2 object ** was registered with FTS5 (the third argument to xCreateTokenizer()). ** The second and third arguments are an array of nul-terminated strings ** containing the tokenizer arguments, if any, specified following the @@ -13191,7 +13402,7 @@ struct Fts5ExtensionApi { ** argument passed to this function is a pointer to an Fts5Tokenizer object ** returned by an earlier call to xCreate(). ** -** The second argument indicates the reason that FTS5 is requesting +** The third argument indicates the reason that FTS5 is requesting ** tokenization of the supplied text. This is always one of the following ** four values: ** @@ -13215,6 +13426,13 @@ struct Fts5ExtensionApi { ** on a columnsize=0 database. ** ** +** The sixth and seventh arguments passed to xTokenize() - pLocale and +** nLocale - are a pointer to a buffer containing the locale to use for +** tokenization (e.g. "en_US") and its size in bytes, respectively. The +** pLocale buffer is not nul-terminated. pLocale may be passed NULL (in +** which case nLocale is always 0) to indicate that the tokenizer should +** use its default locale. +** ** For each token in the input string, the supplied callback xToken() must ** be invoked. The first argument to it should be a copy of the pointer ** passed as the second argument to xTokenize(). The third and fourth @@ -13238,6 +13456,30 @@ struct Fts5ExtensionApi { ** may abandon the tokenization and return any error code other than ** SQLITE_OK or SQLITE_DONE. ** +** If the tokenizer is registered using an fts5_tokenizer_v2 object, +** then the xTokenize() method has two additional arguments - pLocale +** and nLocale. These specify the locale that the tokenizer should use +** for the current request. If pLocale and nLocale are both 0, then the +** tokenizer should use its default locale. Otherwise, pLocale points to +** an nLocale byte buffer containing the name of the locale to use as utf-8 +** text. pLocale is not nul-terminated. +** +** FTS5_TOKENIZER +** +** There is also an fts5_tokenizer object. This is an older, deprecated, +** version of fts5_tokenizer_v2. It is similar except that: +** +**
      +**
    • There is no "iVersion" field, and +**
    • The xTokenize() method does not take a locale argument. +**
    +** +** Legacy fts5_tokenizer tokenizers must be registered using the +** legacy xCreateTokenizer() function, instead of xCreateTokenizer_v2(). +** +** Tokenizer implementations registered using either API may be retrieved +** using both xFindTokenizer() and xFindTokenizer_v2(). +** ** SYNONYM SUPPORT ** ** Custom tokenizers may also support synonyms. Consider a case in which a @@ -13346,6 +13588,33 @@ struct Fts5ExtensionApi { ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer; +typedef struct fts5_tokenizer_v2 fts5_tokenizer_v2; +struct fts5_tokenizer_v2 { + int iVersion; /* Currently always 2 */ + + int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); + void (*xDelete)(Fts5Tokenizer*); + int (*xTokenize)(Fts5Tokenizer*, + void *pCtx, + int flags, /* Mask of FTS5_TOKENIZE_* flags */ + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)( + void *pCtx, /* Copy of 2nd argument to xTokenize() */ + int tflags, /* Mask of FTS5_TOKEN_* flags */ + const char *pToken, /* Pointer to buffer containing token */ + int nToken, /* Size of token in bytes */ + int iStart, /* Byte offset of token within input text */ + int iEnd /* Byte offset of end of token within input text */ + ) + ); +}; + +/* +** New code should use the fts5_tokenizer_v2 type to define tokenizer +** implementations. The following type is included for legacy applications +** that still use it. +*/ typedef struct fts5_tokenizer fts5_tokenizer; struct fts5_tokenizer { int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); @@ -13365,6 +13634,7 @@ struct fts5_tokenizer { ); }; + /* Flags that may be passed as the third argument to xTokenize() */ #define FTS5_TOKENIZE_QUERY 0x0001 #define FTS5_TOKENIZE_PREFIX 0x0002 @@ -13384,7 +13654,7 @@ struct fts5_tokenizer { */ typedef struct fts5_api fts5_api; struct fts5_api { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ /* Create a new tokenizer */ int (*xCreateTokenizer)( @@ -13411,6 +13681,25 @@ struct fts5_api { fts5_extension_function xFunction, void (*xDestroy)(void*) ); + + /* APIs below this point are only available if iVersion>=3 */ + + /* Create a new tokenizer */ + int (*xCreateTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void *pUserData, + fts5_tokenizer_v2 *pTokenizer, + void (*xDestroy)(void*) + ); + + /* Find an existing tokenizer */ + int (*xFindTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer + ); }; /* @@ -13424,103 +13713,8 @@ struct fts5_api { #endif /* _FTS5_H */ /******** End of fts5.h *********/ +#endif /* SQLITE3_H */ #else // USE_LIBSQLITE3 // If users really want to link against the system sqlite3 we // need to make this file a noop. - #endif -/* -** 2014-09-08 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains the application interface definitions for the -** user-authentication extension feature. -** -** To compile with the user-authentication feature, append this file to -** end of an SQLite amalgamation header file ("sqlite3.h"), then add -** the SQLITE_USER_AUTHENTICATION compile-time option. See the -** user-auth.txt file in the same source directory as this file for -** additional information. -*/ -#ifdef SQLITE_USER_AUTHENTICATION - -#ifdef __cplusplus -extern "C" { -#endif - -/* -** If a database contains the SQLITE_USER table, then the -** sqlite3_user_authenticate() interface must be invoked with an -** appropriate username and password prior to enable read and write -** access to the database. -** -** Return SQLITE_OK on success or SQLITE_ERROR if the username/password -** combination is incorrect or unknown. -** -** If the SQLITE_USER table is not present in the database file, then -** this interface is a harmless no-op returnning SQLITE_OK. -*/ -int sqlite3_user_authenticate( - sqlite3 *db, /* The database connection */ - const char *zUsername, /* Username */ - const char *aPW, /* Password or credentials */ - int nPW /* Number of bytes in aPW[] */ -); - -/* -** The sqlite3_user_add() interface can be used (by an admin user only) -** to create a new user. When called on a no-authentication-required -** database, this routine converts the database into an authentication- -** required database, automatically makes the added user an -** administrator, and logs in the current connection as that user. -** The sqlite3_user_add() interface only works for the "main" database, not -** for any ATTACH-ed databases. Any call to sqlite3_user_add() by a -** non-admin user results in an error. -*/ -int sqlite3_user_add( - sqlite3 *db, /* Database connection */ - const char *zUsername, /* Username to be added */ - const char *aPW, /* Password or credentials */ - int nPW, /* Number of bytes in aPW[] */ - int isAdmin /* True to give new user admin privilege */ -); - -/* -** The sqlite3_user_change() interface can be used to change a users -** login credentials or admin privilege. Any user can change their own -** login credentials. Only an admin user can change another users login -** credentials or admin privilege setting. No user may change their own -** admin privilege setting. -*/ -int sqlite3_user_change( - sqlite3 *db, /* Database connection */ - const char *zUsername, /* Username to change */ - const char *aPW, /* New password or credentials */ - int nPW, /* Number of bytes in aPW[] */ - int isAdmin /* Modified admin privilege for the user */ -); - -/* -** The sqlite3_user_delete() interface can be used (by an admin user only) -** to delete a user. The currently logged-in user cannot be deleted, -** which guarantees that there is always an admin user and hence that -** the database cannot be converted into a no-authentication-required -** database. -*/ -int sqlite3_user_delete( - sqlite3 *db, /* Database connection */ - const char *zUsername /* Username to remove */ -); - -#ifdef __cplusplus -} /* end of the 'extern "C"' block */ -#endif - -#endif /* SQLITE_USER_AUTHENTICATION */ + #endif \ No newline at end of file diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go index ed2a9e2a..a967cab0 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -381,7 +381,7 @@ type SQLiteStmt struct { s *C.sqlite3_stmt t string closed bool - cls bool + cls bool // True if the statement was created by SQLiteConn.Query } // SQLiteResult implements sql.Result. @@ -393,12 +393,12 @@ type SQLiteResult struct { // SQLiteRows implements driver.Rows. type SQLiteRows struct { s *SQLiteStmt - nc int + nc int32 // Number of columns + cls bool // True if we need to close the parent statement in Close cols []string decltype []string - cls bool - closed bool ctx context.Context // no better alternative to pass context into Next() method + closemu sync.Mutex } type functionInfo struct { @@ -929,6 +929,7 @@ func (c *SQLiteConn) query(ctx context.Context, query string, args []driver.Name s.(*SQLiteStmt).cls = true na := s.NumInput() if len(args)-start < na { + s.Close() return nil, fmt.Errorf("not enough args to execute query: want %d got %d", na, len(args)-start) } // consume the number of arguments used in the current @@ -1875,6 +1876,9 @@ func (c *SQLiteConn) SetLimit(id int, newVal int) int { // This method is not thread-safe as the returned error code can be changed by // another call if invoked concurrently. // +// Use SetFileControlInt64 instead if the argument for the opcode is documented +// as a pointer to a sqlite3_int64. +// // See: sqlite3_file_control, https://www.sqlite.org/c3ref/file_control.html func (c *SQLiteConn) SetFileControlInt(dbName string, op int, arg int) error { if dbName == "" { @@ -1892,6 +1896,34 @@ func (c *SQLiteConn) SetFileControlInt(dbName string, op int, arg int) error { return nil } +// SetFileControlInt64 invokes the xFileControl method on a given database. The +// dbName is the name of the database. It will default to "main" if left blank. +// The op is one of the opcodes prefixed by "SQLITE_FCNTL_". The arg argument +// and return code are both opcode-specific. Please see the SQLite documentation. +// +// This method is not thread-safe as the returned error code can be changed by +// another call if invoked concurrently. +// +// Only use this method if the argument for the opcode is documented as a pointer +// to a sqlite3_int64. +// +// See: sqlite3_file_control, https://www.sqlite.org/c3ref/file_control.html +func (c *SQLiteConn) SetFileControlInt64(dbName string, op int, arg int64) error { + if dbName == "" { + dbName = "main" + } + + cDBName := C.CString(dbName) + defer C.free(unsafe.Pointer(cDBName)) + + cArg := C.sqlite3_int64(arg) + rv := C.sqlite3_file_control(c.db, cDBName, C.int(op), unsafe.Pointer(&cArg)) + if rv != C.SQLITE_OK { + return c.lastError() + } + return nil +} + // Close the statement. func (s *SQLiteStmt) Close() error { s.mu.Lock() @@ -2007,14 +2039,12 @@ func (s *SQLiteStmt) query(ctx context.Context, args []driver.NamedValue) (drive rows := &SQLiteRows{ s: s, - nc: int(C.sqlite3_column_count(s.s)), + nc: int32(C.sqlite3_column_count(s.s)), + cls: s.cls, cols: nil, decltype: nil, - cls: s.cls, - closed: false, ctx: ctx, } - runtime.SetFinalizer(rows, (*SQLiteRows).Close) return rows, nil } @@ -2111,24 +2141,28 @@ func (s *SQLiteStmt) Readonly() bool { // Close the rows. func (rc *SQLiteRows) Close() error { - rc.s.mu.Lock() - if rc.s.closed || rc.closed { - rc.s.mu.Unlock() + rc.closemu.Lock() + defer rc.closemu.Unlock() + s := rc.s + if s == nil { + return nil + } + rc.s = nil // remove reference to SQLiteStmt + s.mu.Lock() + if s.closed { + s.mu.Unlock() return nil } - rc.closed = true if rc.cls { - rc.s.mu.Unlock() - return rc.s.Close() + s.mu.Unlock() + return s.Close() } - rv := C.sqlite3_reset(rc.s.s) + rv := C.sqlite3_reset(s.s) if rv != C.SQLITE_OK { - rc.s.mu.Unlock() - return rc.s.c.lastError() + s.mu.Unlock() + return s.c.lastError() } - rc.s.mu.Unlock() - rc.s = nil - runtime.SetFinalizer(rc, nil) + s.mu.Unlock() return nil } @@ -2136,9 +2170,9 @@ func (rc *SQLiteRows) Close() error { func (rc *SQLiteRows) Columns() []string { rc.s.mu.Lock() defer rc.s.mu.Unlock() - if rc.s.s != nil && rc.nc != len(rc.cols) { + if rc.s.s != nil && int(rc.nc) != len(rc.cols) { rc.cols = make([]string, rc.nc) - for i := 0; i < rc.nc; i++ { + for i := 0; i < int(rc.nc); i++ { rc.cols[i] = C.GoString(C.sqlite3_column_name(rc.s.s, C.int(i))) } } @@ -2148,7 +2182,7 @@ func (rc *SQLiteRows) Columns() []string { func (rc *SQLiteRows) declTypes() []string { if rc.s.s != nil && rc.decltype == nil { rc.decltype = make([]string, rc.nc) - for i := 0; i < rc.nc; i++ { + for i := 0; i < int(rc.nc); i++ { rc.decltype[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i)))) } } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c index fc37b336..3a00f43d 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c @@ -5,7 +5,11 @@ #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY #include +#ifndef USE_LIBSQLITE3 #include "sqlite3-binding.h" +#else +#include +#endif extern int unlock_notify_wait(sqlite3 *db); diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go index 76f7bbfb..3ac8050a 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go @@ -12,7 +12,11 @@ package sqlite3 #cgo CFLAGS: -DSQLITE_ENABLE_UNLOCK_NOTIFY #include +#ifndef USE_LIBSQLITE3 #include "sqlite3-binding.h" +#else +#include +#endif extern void unlock_notify_callback(void *arg, int argc); */ diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md deleted file mode 100644 index c7582349..00000000 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ /dev/null @@ -1,96 +0,0 @@ -## 1.5.0 - -* New option `IgnoreUntaggedFields` to ignore decoding to any fields - without `mapstructure` (or the configured tag name) set [GH-277] -* New option `ErrorUnset` which makes it an error if any fields - in a target struct are not set by the decoding process. [GH-225] -* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] -* Decoding to slice from array no longer crashes [GH-265] -* Decode nested struct pointers to map [GH-271] -* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] -* Fix issue where fields with `,omitempty` would sometimes decode - into a map with an empty string key [GH-281] - -## 1.4.3 - -* Fix cases where `json.Number` didn't decode properly [GH-261] - -## 1.4.2 - -* Custom name matchers to support any sort of casing, formatting, etc. for - field names. [GH-250] -* Fix possible panic in ComposeDecodeHookFunc [GH-251] - -## 1.4.1 - -* Fix regression where `*time.Time` value would be set to empty and not be sent - to decode hooks properly [GH-232] - -## 1.4.0 - -* A new decode hook type `DecodeHookFuncValue` has been added that has - access to the full values. [GH-183] -* Squash is now supported with embedded fields that are struct pointers [GH-205] -* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] - -## 1.3.3 - -* Decoding maps from maps creates a settable value for decode hooks [GH-203] - -## 1.3.2 - -* Decode into interface type with a struct value is supported [GH-187] - -## 1.3.1 - -* Squash should only squash embedded structs. [GH-194] - -## 1.3.0 - -* Added `",omitempty"` support. This will ignore zero values in the source - structure when encoding. [GH-145] - -## 1.2.3 - -* Fix duplicate entries in Keys list with pointer values. [GH-185] - -## 1.2.2 - -* Do not add unsettable (unexported) values to the unused metadata key - or "remain" value. [GH-150] - -## 1.2.1 - -* Go modules checksum mismatch fix - -## 1.2.0 - -* Added support to capture unused values in a field using the `",remain"` value - in the mapstructure tag. There is an example to showcase usage. -* Added `DecoderConfig` option to always squash embedded structs -* `json.Number` can decode into `uint` types -* Empty slices are preserved and not replaced with nil slices -* Fix panic that can occur in when decoding a map into a nil slice of structs -* Improved package documentation for godoc - -## 1.1.2 - -* Fix error when decode hook decodes interface implementation into interface - type. [GH-140] - -## 1.1.1 - -* Fix panic that can happen in `decodePtr` - -## 1.1.0 - -* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] -* Support struct to struct decoding [GH-137] -* If source map value is nil, then destination map value is nil (instead of empty) -* If source slice value is nil, then destination slice value is nil (instead of empty) -* If source pointer is nil, then destination pointer is set to nil (instead of - allocated zero value of type) - -## 1.0.0 - -* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a5..00000000 --- a/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7d..00000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 3a754ca7..00000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,279 +0,0 @@ -package mapstructure - -import ( - "encoding" - "errors" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - var f3 DecodeHookFuncValue - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2, f3} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Value, to reflect.Value) (interface{}, error) { - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from.Type(), to.Type(), from.Interface()) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), from.Interface()) - case DecodeHookFuncValue: - return f(from, to) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - var err error - data := f.Interface() - - newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) - if err != nil { - return nil, err - } - newFrom = reflect.ValueOf(data) - } - - return data, nil - } -} - -// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. -// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. -func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { - return func(a, b reflect.Value) (interface{}, error) { - var allErrs string - var out interface{} - var err error - - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) - if err != nil { - allErrs += err.Error() + "\n" - continue - } - - return out, nil - } - - return nil, errors.New(allErrs) - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToIPHookFunc returns a DecodeHookFunc that converts -// strings to net.IP -func StringToIPHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IP{}) { - return data, nil - } - - // Convert it by parsing - ip := net.ParseIP(data.(string)) - if ip == nil { - return net.IP{}, fmt.Errorf("failed parsing ip %v", data) - } - - return ip, nil - } -} - -// StringToIPNetHookFunc returns a DecodeHookFunc that converts -// strings to net.IPNet -func StringToIPNetHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IPNet{}) { - return data, nil - } - - // Convert it by parsing - _, net, err := net.ParseCIDR(data.(string)) - return net, err - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} - -func RecursiveStructToMapHookFunc() DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - if f.Kind() != reflect.Struct { - return f.Interface(), nil - } - - var i interface{} = struct{}{} - if t.Type() != reflect.TypeOf(&i).Elem() { - return f.Interface(), nil - } - - m := make(map[string]interface{}) - t.Set(reflect.ValueOf(m)) - - return f.Interface(), nil - } -} - -// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies -// strings to the UnmarshalText function, when the target type -// implements the encoding.TextUnmarshaler interface -func TextUnmarshallerHookFunc() DecodeHookFuncType { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - result := reflect.New(t).Interface() - unmarshaller, ok := result.(encoding.TextUnmarshaler) - if !ok { - return data, nil - } - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { - return nil, err - } - return result, nil - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5a..00000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index 1efb22ac..00000000 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,1540 +0,0 @@ -// Package mapstructure exposes functionality to convert one arbitrary -// Go type into another, typically to convert a map[string]interface{} -// into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -// -// The simplest function to start with is Decode. -// -// Field Tags -// -// When decoding to a struct, mapstructure will use the field name by -// default to perform the mapping. For example, if a struct has a field -// "Username" then mapstructure will look for a key in the source value -// of "username" (case insensitive). -// -// type User struct { -// Username string -// } -// -// You can change the behavior of mapstructure by using struct tags. -// The default struct tag that mapstructure looks for is "mapstructure" -// but you can customize it using DecoderConfig. -// -// Renaming Fields -// -// To rename the key that mapstructure looks for, use the "mapstructure" -// tag and set a value directly. For example, to change the "username" example -// above to "user": -// -// type User struct { -// Username string `mapstructure:"user"` -// } -// -// Embedded Structs and Squashing -// -// Embedded structs are treated as if they're another field with that name. -// By default, the two structs below are equivalent when decoding with -// mapstructure: -// -// type Person struct { -// Name string -// } -// -// type Friend struct { -// Person -// } -// -// type Friend struct { -// Person Person -// } -// -// This would require an input that looks like below: -// -// map[string]interface{}{ -// "person": map[string]interface{}{"name": "alice"}, -// } -// -// If your "person" value is NOT nested, then you can append ",squash" to -// your tag value and mapstructure will treat it as if the embedded struct -// were part of the struct directly. Example: -// -// type Friend struct { -// Person `mapstructure:",squash"` -// } -// -// Now the following input would be accepted: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// When decoding from a struct to a map, the squash tag squashes the struct -// fields into a single map. Using the example structs from above: -// -// Friend{Person: Person{Name: "alice"}} -// -// Will be decoded into a map: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// DecoderConfig has a field that changes the behavior of mapstructure -// to always squash embedded structs. -// -// Remainder Values -// -// If there are any unmapped keys in the source value, mapstructure by -// default will silently ignore them. You can error by setting ErrorUnused -// in DecoderConfig. If you're using Metadata you can also maintain a slice -// of the unused keys. -// -// You can also use the ",remain" suffix on your tag to collect all unused -// values in a map. The field with this tag MUST be a map type and should -// probably be a "map[string]interface{}" or "map[interface{}]interface{}". -// See example below: -// -// type Friend struct { -// Name string -// Other map[string]interface{} `mapstructure:",remain"` -// } -// -// Given the input below, Other would be populated with the other -// values that weren't used (everything but "name"): -// -// map[string]interface{}{ -// "name": "bob", -// "address": "123 Maple St.", -// } -// -// Omit Empty Values -// -// When decoding from a struct to any other value, you may use the -// ",omitempty" suffix on your tag to omit that value if it equates to -// the zero value. The zero value of all types is specified in the Go -// specification. -// -// For example, the zero type of a numeric type is zero ("0"). If the struct -// field value is zero and a numeric type, the field is empty, and it won't -// be encoded into the destination type. -// -// type Source struct { -// Age int `mapstructure:",omitempty"` -// } -// -// Unexported fields -// -// Since unexported (private) struct fields cannot be set outside the package -// where they are defined, the decoder will simply skip them. -// -// For this output type definition: -// -// type Exported struct { -// private string // this unexported field will be skipped -// Public string -// } -// -// Using this map as input: -// -// map[string]interface{}{ -// "private": "I will be ignored", -// "Public": "I made it through!", -// } -// -// The following struct will be decoded: -// -// type Exported struct { -// private: "" // field is left with an empty string (zero value) -// Public: "I made it through!" -// } -// -// Other Configuration -// -// mapstructure is highly configurable. See the DecoderConfig struct -// for other features and options that are supported. -package mapstructure - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or -// DecodeHookFuncValue. -// Values are a superset of Types (Values can return types), and Types are a -// superset of Kinds (Types can return Kinds) and are generally a richer thing -// to use, but Kinds are simpler if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -// DecodeHookFuncType is a DecodeHookFunc which has complete information about -// the source and target types. -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) - -// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the -// source and target types. -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target -// values. -type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. The - // DecodeHook is called for every map and value in the input. This means - // that if a struct has embedded fields with squash tags the decode hook - // is called only once with all of the input data, not once for each - // embedded struct. - // - // If an error is returned, the entire decode will fail with that error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // If ErrorUnset is true, then it is an error for there to exist - // fields in the result that were not set in the decoding process - // (extra fields). This only applies to decoding to a struct. This - // will affect all nested structs as well. - ErrorUnset bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - single values are converted to slices if required. Each - // element is weakly decoded. For example: "4" can become []int{4} - // if the target type is an int slice. - // - WeaklyTypedInput bool - - // Squash will squash embedded structs. A squash tag may also be - // added to an individual struct field using a tag. For example: - // - // type Parent struct { - // Child `mapstructure:",squash"` - // } - Squash bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string - - // IgnoreUntaggedFields ignores all struct fields without explicit - // TagName, comparable to `mapstructure:"-"` as default behaviour. - IgnoreUntaggedFields bool - - // MatchName is the function used to match the map key to the struct - // field name or tag. Defaults to `strings.EqualFold`. This can be used - // to implement case-sensitive tag values, support snake casing, etc. - MatchName func(mapKey, fieldName string) bool -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string - - // Unset is a slice of field names that were found in the result interface - // but weren't set in the decoding process since there was no matching value - // in the input - Unset []string -} - -// Decode takes an input structure and uses reflection to translate it to -// the output structure. output must be a pointer to a map or struct. -func Decode(input interface{}, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// DecodeMetadata is the same as Decode, but is shorthand to -// enable metadata collection. See DecoderConfig for more info. -func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecodeMetadata is the same as Decode, but is shorthand to -// enable both WeaklyTypedInput and metadata collection. See -// DecoderConfig for more info. -func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - - if config.Metadata.Unset == nil { - config.Metadata.Unset = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - if config.MatchName == nil { - config.MatchName = strings.EqualFold - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } - } - - if input == nil { - // If the data is nil, then we don't set anything, unless ZeroFields is set - // to true. - if d.config.ZeroFields { - outVal.Set(reflect.Zero(outVal.Type())) - - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - } - return nil - } - - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the input. - var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) - if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) - } - } - - var err error - outputKind := getKind(outVal) - addMetaKey := true - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - addMetaKey, err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, outputKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metainput. - if addMetaKey && d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - if val.IsValid() && val.Elem().IsValid() { - elem := val.Elem() - - // If we can't address this element, then its not writable. Instead, - // we make a copy of the value (which is a pointer and therefore - // writable), decode into that, and replace the whole value. - copied := false - if !elem.CanAddr() { - copied = true - - // Make *T - copy := reflect.New(elem.Type()) - - // *T = elem - copy.Elem().Set(elem) - - // Set elem so we decode into it - elem = copy - } - - // Decode. If we have an error then return. We also return right - // away if we're not a copy because that means we decoded directly. - if err := d.decode(name, data, elem); err != nil || !copied { - return err - } - - // If we're a copy, we need to set te final result - val.Set(elem.Elem()) - return nil - } - - dataVal := reflect.ValueOf(data) - - // If the input data is a pointer, and the assigned type is the dereference - // of that exact pointer, then indirect it so that we can assign it. - // Example: *string to string - if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { - dataVal = reflect.Indirect(dataVal) - } - - if !dataVal.IsValid() { - dataVal = reflect.Zero(val.Type()) - } - - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput, - dataKind == reflect.Array && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch elemKind { - case reflect.Uint8: - var uints []uint8 - if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) - for i := range uints { - uints[i] = dataVal.Index(i).Interface().(uint8) - } - } else { - uints = dataVal.Interface().([]uint8) - } - val.SetString(string(uints)) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseInt(str, 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Int64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetInt(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseUint(str, 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := strconv.ParseUint(string(jn), 0, 64) - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetUint(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(dataVal.Float()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - f, err := strconv.ParseFloat(str, val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Float64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetFloat(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) - switch dataVal.Kind() { - case reflect.Map: - return d.decodeMapFromMap(name, dataVal, val, valMap) - - case reflect.Struct: - return d.decodeMapFromStruct(name, dataVal, val, valMap) - - case reflect.Array, reflect.Slice: - if d.config.WeaklyTypedInput { - return d.decodeMapFromSlice(name, dataVal, val, valMap) - } - - fallthrough - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - name+"["+strconv.Itoa(i)+"]", - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // Accumulate errors - errors := make([]string, 0) - - // If the input data is empty, then we just match what the input data is. - if dataVal.Len() == 0 { - if dataVal.IsNil() { - if !val.IsNil() { - val.Set(dataVal) - } - } else { - // Set to empty allocated value - val.Set(valMap) - } - - return nil - } - - for _, k := range dataVal.MapKeys() { - fieldName := name + "[" + k.String() + "]" - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - typ := dataVal.Type() - for i := 0; i < typ.NumField(); i++ { - // Get the StructField first since this is a cheap operation. If the - // field is unexported, then ignore it. - f := typ.Field(i) - if f.PkgPath != "" { - continue - } - - // Next get the actual value of this field and verify it is assignable - // to the map value. - v := dataVal.Field(i) - if !v.Type().AssignableTo(valMap.Type().Elem()) { - return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) - } - - tagValue := f.Tag.Get(d.config.TagName) - keyName := f.Name - - if tagValue == "" && d.config.IgnoreUntaggedFields { - continue - } - - // If Squash is set in the config, we squash the field down. - squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous - - v = dereferencePtrToStructIfNeeded(v, d.config.TagName) - - // Determine the name of the key in the map - if index := strings.Index(tagValue, ","); index != -1 { - if tagValue[:index] == "-" { - continue - } - // If "omitempty" is specified in the tag, it ignores empty values. - if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { - continue - } - - // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 - if squash { - // When squashing, the embedded type can be a pointer to a struct. - if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { - v = v.Elem() - } - - // The final type must be a struct - if v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) - } - } - if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { - keyName = keyNameTagValue - } - } else if len(tagValue) > 0 { - if tagValue == "-" { - continue - } - keyName = tagValue - } - - switch v.Kind() { - // this is an embedded struct, so handle it differently - case reflect.Struct: - x := reflect.New(v.Type()) - x.Elem().Set(v) - - vType := valMap.Type() - vKeyType := vType.Key() - vElemType := vType.Elem() - mType := reflect.MapOf(vKeyType, vElemType) - vMap := reflect.MakeMap(mType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(vMap.Type()) - reflect.Indirect(addrVal).Set(vMap) - - err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) - if err != nil { - return err - } - - // the underlying map may have been completely overwritten so pull - // it indirectly out of the enclosing value. - vMap = reflect.Indirect(addrVal) - - if squash { - for _, k := range vMap.MapKeys() { - valMap.SetMapIndex(k, vMap.MapIndex(k)) - } - } else { - valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) - } - - default: - valMap.SetMapIndex(reflect.ValueOf(keyName), v) - } - } - - if val.CanAddr() { - val.Set(valMap) - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { - // If the input data is nil, then we want to just set the output - // pointer to be nil as well. - isNil := data == nil - if !isNil { - switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { - case reflect.Chan, - reflect.Func, - reflect.Interface, - reflect.Map, - reflect.Ptr, - reflect.Slice: - isNil = v.IsNil() - } - } - if isNil { - if !val.IsNil() && val.CanSet() { - nilValue := reflect.New(val.Type()).Elem() - val.Set(nilValue) - } - - return true, nil - } - - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - if val.CanSet() { - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } - - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return false, err - } - - val.Set(realVal) - } else { - if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return false, err - } - } - return false, nil -} - -func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if val.Type() != dataVal.Type() { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - // If we have a non array/slice type then we first attempt to convert. - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Slice and array we use the normal logic - case dataValKind == reflect.Slice, dataValKind == reflect.Array: - break - - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - - // If the input value is nil, then don't allocate since empty != nil - if dataValKind != reflect.Array && dataVal.IsNil() { - return nil - } - - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { - // Make a new slice to hold our result, same size as the original data. - valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - for valSlice.Len() <= i { - valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) - } - currentField := valSlice.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - arrayType := reflect.ArrayOf(valType.Len(), valElemType) - - valArray := val - - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty arrays - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.Zero(arrayType)) - return nil - } - - // All other types we try to convert to the array type - // and "lift" it into it. i.e. a string becomes a string array. - default: - // Just re-try this function with data as a slice. - return d.decodeArray(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - if dataVal.Len() > arrayType.Len() { - return fmt.Errorf( - "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - - } - - // Make a new array to hold our result, same size as the original data. - valArray = reflect.New(arrayType).Elem() - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valArray.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the array we built up - val.Set(valArray) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - switch dataValKind { - case reflect.Map: - return d.decodeStructFromMap(name, dataVal, val) - - case reflect.Struct: - // Not the most efficient way to do this but we can optimize later if - // we want to. To convert from struct to struct we go to map first - // as an intermediary. - - // Make a new map to hold our result - mapType := reflect.TypeOf((map[string]interface{})(nil)) - mval := reflect.MakeMap(mapType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(mval.Type()) - - reflect.Indirect(addrVal).Set(mval) - if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { - return err - } - - result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) - return result - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - - // remainField is set to a valid field set with the "remain" tag if - // we are keeping track of remaining values. - var remainField *field - - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldVal := structVal.Field(i) - if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { - // Handle embedded struct pointers as embedded structs. - fieldVal = fieldVal.Elem() - } - - // If "squash" is specified in the tag, we squash the field down. - squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous - remain := false - - // We always parse the tags cause we're looking for other tags too - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - - if tag == "remain" { - remain = true - break - } - } - - if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { - structs = append(structs, fieldVal) - } - continue - } - - // Build our field - if remain { - remainField = &field{fieldType, fieldVal} - } else { - // Normal struct field, store it away - fields = append(fields, field{fieldType, fieldVal}) - } - } - } - - // for fieldType, field := range fields { - for _, f := range fields { - field, fieldValue := f.field, f.val - fieldName := field.Name - - tagValue := field.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if d.config.MatchName(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Remember it for potential errors and metadata. - targetValKeysUnused[fieldName] = struct{}{} - continue - } - } - - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = name + "." + fieldName - } - - if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) - } - } - - // If we have a "remain"-tagged field and we have unused keys then - // we put the unused keys directly into the remain field. - if remainField != nil && len(dataValKeysUnused) > 0 { - // Build a map of only the unused values - remain := map[interface{}]interface{}{} - for key := range dataValKeysUnused { - remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() - } - - // Decode it as-if we were just decoding this map onto our map. - if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) - } - - // Set the map to nil so we have none so that the next check will - // not error (ErrorUnused) - dataValKeysUnused = nil - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { - keys := make([]string, 0, len(targetValKeysUnused)) - for rawKey := range targetValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - for rawKey := range targetValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) - } - } - - return nil -} - -func isEmptyValue(v reflect.Value) bool { - switch getKind(v) { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} - -func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields - return true - } - if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside - return true - } - } - return false -} - -func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return v - } - deref := v.Elem() - derefT := deref.Type() - if isStructTypeConvertibleToMap(derefT, true, tagName) { - return deref - } - return v -} diff --git a/vendor/github.com/moby/sys/capability/.codespellrc b/vendor/github.com/moby/sys/capability/.codespellrc deleted file mode 100644 index e874be56..00000000 --- a/vendor/github.com/moby/sys/capability/.codespellrc +++ /dev/null @@ -1,3 +0,0 @@ -[codespell] -skip = ./.git -ignore-words-list = nd diff --git a/vendor/github.com/moby/sys/capability/.golangci.yml b/vendor/github.com/moby/sys/capability/.golangci.yml deleted file mode 100644 index d775aadd..00000000 --- a/vendor/github.com/moby/sys/capability/.golangci.yml +++ /dev/null @@ -1,6 +0,0 @@ -linters: - enable: - - unconvert - - unparam - - gofumpt - - errorlint diff --git a/vendor/github.com/moby/sys/capability/CHANGELOG.md b/vendor/github.com/moby/sys/capability/CHANGELOG.md index 037ef010..299b36d9 100644 --- a/vendor/github.com/moby/sys/capability/CHANGELOG.md +++ b/vendor/github.com/moby/sys/capability/CHANGELOG.md @@ -5,6 +5,30 @@ from https://github.com/syndtr/gocapability/commit/42c35b4376354fd5. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.4.0] - 2024-11-11 + +### Added +* New separate API for ambient ([GetAmbient], [SetAmbient], [ResetAmbient]) + and bound ([GetBound], [DropBound]) capabilities, modelled after libcap. (#176) + +### Fixed +* [Apply] now returns an error if called for non-zero `pid`. Before this change, + it could silently change some capabilities of the current process, instead of + the one identified by the `pid`. (#168, #174) +* Fixed tests that change capabilities to be run in a separate process. (#173) +* Other improvements in tests. (#169, #170) + +### Changed +* Use raw syscalls (which are slightly faster). (#176) +* Most tests are now limited to testing the public API of the package. (#162) +* Simplify parsing /proc/*pid*/status, add a test case. (#162) +* Optimize the number of syscall to set ambient capabilities in Apply + by clearing them first; add a test case. (#163, #164) +* Better documentation for [Apply], [NewFile], [NewFile2], [NewPid], [NewPid2]. (#175) + +### Removed +* `.golangci.yml` and `.codespellrc` are no longer part of the package. (#158) + ## [0.3.0] - 2024-09-25 ### Added @@ -63,14 +87,24 @@ This is an initial release since the fork. * Removed init function so programs that use this package start faster. [#6] * Removed `CAP_LAST_CAP` (use [LastCap] instead). [#6] - + [Apply]: https://pkg.go.dev/github.com/moby/sys/capability#Capabilities.Apply +[DropBound]: https://pkg.go.dev/github.com/moby/sys/capability#DropBound +[GetAmbient]: https://pkg.go.dev/github.com/moby/sys/capability#GetAmbient +[GetBound]: https://pkg.go.dev/github.com/moby/sys/capability#GetBound [LastCap]: https://pkg.go.dev/github.com/moby/sys/capability#LastCap -[List]: https://pkg.go.dev/github.com/moby/sys/capability#List [ListKnown]: https://pkg.go.dev/github.com/moby/sys/capability#ListKnown [ListSupported]: https://pkg.go.dev/github.com/moby/sys/capability#ListSupported +[List]: https://pkg.go.dev/github.com/moby/sys/capability#List +[NewFile2]: https://pkg.go.dev/github.com/moby/sys/capability#NewFile2 +[NewFile]: https://pkg.go.dev/github.com/moby/sys/capability#NewFile +[NewPid2]: https://pkg.go.dev/github.com/moby/sys/capability#NewPid2 +[NewPid]: https://pkg.go.dev/github.com/moby/sys/capability#NewPid +[ResetAmbient]: https://pkg.go.dev/github.com/moby/sys/capability#ResetAmbient +[SetAmbient]: https://pkg.go.dev/github.com/moby/sys/capability#SetAmbient +[0.4.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.4.0 [0.3.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.3.0 [0.2.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.2.0 [0.1.1]: https://github.com/kolyshkin/capability/compare/v0.1.0...v0.1.1 diff --git a/vendor/github.com/moby/sys/capability/capability.go b/vendor/github.com/moby/sys/capability/capability.go index 1b36f5f2..11e47bed 100644 --- a/vendor/github.com/moby/sys/capability/capability.go +++ b/vendor/github.com/moby/sys/capability/capability.go @@ -56,16 +56,16 @@ type Capabilities interface { // outstanding changes. Load() error - // Apply apply the capabilities settings, so all changes will take - // effect. + // Apply apply the capabilities settings, so all changes made by + // [Set], [Unset], [Fill], or [Clear] will take effect. Apply(kind CapType) error } // NewPid initializes a new [Capabilities] object for given pid when // it is nonzero, or for the current process if pid is 0. // -// Deprecated: Replace with [NewPid2] followed by [Capabilities.Load]. -// For example, replace: +// Deprecated: replace with [NewPid2] followed by optional [Capabilities.Load] +// (only if needed). For example, replace: // // c, err := NewPid(0) // if err != nil { @@ -93,16 +93,16 @@ func NewPid(pid int) (Capabilities, error) { // NewPid2 initializes a new [Capabilities] object for given pid when // it is nonzero, or for the current process if pid is 0. This -// does not load the process's current capabilities; to do that you -// must call [Capabilities.Load] explicitly. +// does not load the process's current capabilities; if needed, +// call [Capabilities.Load]. func NewPid2(pid int) (Capabilities, error) { return newPid(pid) } // NewFile initializes a new Capabilities object for given file path. // -// Deprecated: Replace with [NewFile2] followed by [Capabilities.Load]. -// For example, replace: +// Deprecated: replace with [NewFile2] followed by optional [Capabilities.Load] +// (only if needed). For example, replace: // // c, err := NewFile(path) // if err != nil { @@ -130,7 +130,7 @@ func NewFile(path string) (Capabilities, error) { // NewFile2 creates a new initialized [Capabilities] object for given // file path. This does not load the process's current capabilities; -// to do that you must call [Capabilities.Load] explicitly. +// if needed, call [Capabilities.Load]. func NewFile2(path string) (Capabilities, error) { return newFile(path) } @@ -142,3 +142,35 @@ func NewFile2(path string) (Capabilities, error) { func LastCap() (Cap, error) { return lastCap() } + +// GetAmbient determines if a specific ambient capability is raised in the +// calling thread. +func GetAmbient(c Cap) (bool, error) { + return getAmbient(c) +} + +// SetAmbient raises or lowers specified ambient capabilities for the calling +// thread. To complete successfully, the prevailing effective capability set +// must have a raised CAP_SETPCAP. Further, to raise a specific ambient +// capability the inheritable and permitted sets of the calling thread must +// already contain the specified capability. +func SetAmbient(raise bool, caps ...Cap) error { + return setAmbient(raise, caps...) +} + +// ResetAmbient resets all of the ambient capabilities for the calling thread +// to their lowered value. +func ResetAmbient() error { + return resetAmbient() +} + +// GetBound determines if a specific bounding capability is raised in the +// calling thread. +func GetBound(c Cap) (bool, error) { + return getBound(c) +} + +// DropBound lowers the specified bounding set capability. +func DropBound(caps ...Cap) error { + return dropBound(caps...) +} diff --git a/vendor/github.com/moby/sys/capability/capability_linux.go b/vendor/github.com/moby/sys/capability/capability_linux.go index aa600e1d..234b1efb 100644 --- a/vendor/github.com/moby/sys/capability/capability_linux.go +++ b/vendor/github.com/moby/sys/capability/capability_linux.go @@ -117,6 +117,13 @@ func newPid(pid int) (c Capabilities, retErr error) { return } +func ignoreEINVAL(err error) error { + if errors.Is(err, syscall.EINVAL) { + err = nil + } + return err +} + type capsV3 struct { hdr capHeader data [2]capData @@ -307,15 +314,15 @@ func (c *capsV3) Load() (err error) { } break } - if strings.HasPrefix(line, "CapB") { - _, err = fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0]) + if val, ok := strings.CutPrefix(line, "CapBnd:\t"); ok { + _, err = fmt.Sscanf(val, "%08x%08x", &c.bounds[1], &c.bounds[0]) if err != nil { break } continue } - if strings.HasPrefix(line, "CapA") { - _, err = fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0]) + if val, ok := strings.CutPrefix(line, "CapAmb:\t"); ok { + _, err = fmt.Sscanf(val, "%08x%08x", &c.ambient[1], &c.ambient[0]) if err != nil { break } @@ -327,7 +334,10 @@ func (c *capsV3) Load() (err error) { return } -func (c *capsV3) Apply(kind CapType) (err error) { +func (c *capsV3) Apply(kind CapType) error { + if c.hdr.pid != 0 { + return errors.New("unable to modify capabilities of another process") + } last, err := LastCap() if err != nil { return err @@ -336,21 +346,17 @@ func (c *capsV3) Apply(kind CapType) (err error) { var data [2]capData err = capget(&c.hdr, &data[0]) if err != nil { - return + return err } if (1< 0, nil +} + +func setAmbient(raise bool, caps ...Cap) error { + op := pr_CAP_AMBIENT_RAISE + if !raise { + op = pr_CAP_AMBIENT_LOWER + } + for _, val := range caps { + err := prctl(pr_CAP_AMBIENT, op, uintptr(val)) + if err != nil { + return err + } + } + return nil +} + +func resetAmbient() error { + return prctl(pr_CAP_AMBIENT, pr_CAP_AMBIENT_CLEAR_ALL, 0) +} + +func getBound(c Cap) (bool, error) { + res, err := prctlRetInt(syscall.PR_CAPBSET_READ, uintptr(c), 0) + if err != nil { + return false, err + } + return res > 0, nil +} + +func dropBound(caps ...Cap) error { + for _, val := range caps { + err := prctl(syscall.PR_CAPBSET_DROP, uintptr(val), 0) + if err != nil { + return err + } + } + return nil } func newFile(path string) (c Capabilities, err error) { diff --git a/vendor/github.com/moby/sys/capability/capability_noop.go b/vendor/github.com/moby/sys/capability/capability_noop.go index ba819ff0..b766e444 100644 --- a/vendor/github.com/moby/sys/capability/capability_noop.go +++ b/vendor/github.com/moby/sys/capability/capability_noop.go @@ -24,3 +24,23 @@ func newFile(_ string) (Capabilities, error) { func lastCap() (Cap, error) { return -1, errNotSup } + +func getAmbient(_ Cap) (bool, error) { + return false, errNotSup +} + +func setAmbient(_ bool, _ ...Cap) error { + return errNotSup +} + +func resetAmbient() error { + return errNotSup +} + +func getBound(_ Cap) (bool, error) { + return false, errNotSup +} + +func dropBound(_ ...Cap) error { + return errNotSup +} diff --git a/vendor/github.com/moby/sys/capability/enum.go b/vendor/github.com/moby/sys/capability/enum.go index f89f0273..f8859331 100644 --- a/vendor/github.com/moby/sys/capability/enum.go +++ b/vendor/github.com/moby/sys/capability/enum.go @@ -316,7 +316,7 @@ func ListKnown() []Cap { return list() } -// ListSupported retuns the list of all capabilities known to the package, +// ListSupported returns the list of all capabilities known to the package, // except those that are not supported by the currently running Linux kernel. func ListSupported() ([]Cap, error) { last, err := LastCap() diff --git a/vendor/github.com/moby/sys/capability/syscall_linux.go b/vendor/github.com/moby/sys/capability/syscall_linux.go index d6b6932a..2d8faa85 100644 --- a/vendor/github.com/moby/sys/capability/syscall_linux.go +++ b/vendor/github.com/moby/sys/capability/syscall_linux.go @@ -24,7 +24,7 @@ type capData struct { } func capget(hdr *capHeader, data *capData) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + _, _, e1 := syscall.RawSyscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) if e1 != 0 { err = e1 } @@ -32,7 +32,7 @@ func capget(hdr *capHeader, data *capData) (err error) { } func capset(hdr *capHeader, data *capData) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + _, _, e1 := syscall.RawSyscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) if e1 != 0 { err = e1 } @@ -48,14 +48,22 @@ const ( pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4) ) -func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { - _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) +func prctl(option int, arg2, arg3 uintptr) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_PRCTL, uintptr(option), arg2, arg3) if e1 != 0 { err = e1 } return } +func prctlRetInt(option int, arg2, arg3 uintptr) (int, error) { + ret, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, uintptr(option), arg2, arg3) + if err != 0 { + return 0, err + } + return int(ret), nil +} + const ( vfsXattrName = "security.capability" @@ -92,7 +100,7 @@ func getVfsCap(path string, dest *vfscapData) (err error) { if err != nil { return } - r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0) + r0, _, e1 := syscall.RawSyscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0) if e1 != 0 { if e1 == syscall.ENODATA { dest.version = 2 @@ -145,7 +153,7 @@ func setVfsCap(path string, data *vfscapData) (err error) { } else { return syscall.EINVAL } - _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0) + _, _, e1 := syscall.RawSyscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0) if e1 != 0 { err = e1 } diff --git a/vendor/github.com/moby/sys/user/idtools.go b/vendor/github.com/moby/sys/user/idtools.go new file mode 100644 index 00000000..595b7a92 --- /dev/null +++ b/vendor/github.com/moby/sys/user/idtools.go @@ -0,0 +1,141 @@ +package user + +import ( + "fmt" + "os" +) + +// MkdirOpt is a type for options to pass to Mkdir calls +type MkdirOpt func(*mkdirOptions) + +type mkdirOptions struct { + onlyNew bool +} + +// WithOnlyNew is an option for MkdirAllAndChown that will only change ownership and permissions +// on newly created directories. If the directory already exists, it will not be modified +func WithOnlyNew(o *mkdirOptions) { + o.onlyNew = true +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. By default, if the directory already exists, this +// function will still change ownership and permissions. If WithOnlyNew is passed as an +// option, then only the newly created directories will have ownership and permissions changed. +func MkdirAllAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error { + var options mkdirOptions + for _, opt := range opts { + opt(&options) + } + + return mkdirAs(path, mode, uid, gid, true, options.onlyNew) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// By default, if the directory already exists, this function still changes ownership and permissions. +// If WithOnlyNew is passed as an option, then only the newly created directory will have ownership +// and permissions changed. +// Note that unlike os.Mkdir(), this function does not return IsExist error +// in case path already exists. +func MkdirAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error { + var options mkdirOptions + for _, opt := range opts { + opt(&options) + } + return mkdirAs(path, mode, uid, gid, false, options.onlyNew) +} + +// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (int64(hostID) >= m.ParentID) && (int64(hostID) <= (m.ParentID + m.Count - 1)) { + contID := int(m.ID + (int64(hostID) - m.ParentID)) + return contID, nil + } + } + return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (int64(contID) >= m.ID) && (int64(contID) <= (m.ID + m.Count - 1)) { + hostID := int(m.ParentID + (int64(contID) - m.ID)) + return hostID, nil + } + } + return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID) +} + +// IdentityMapping contains a mappings of UIDs and GIDs. +// The zero value represents an empty mapping. +type IdentityMapping struct { + UIDMaps []IDMap `json:"UIDMaps"` + GIDMaps []IDMap `json:"GIDMaps"` +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i IdentityMapping) RootPair() (int, int) { + uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps) + return uid, gid +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i IdentityMapping) ToHost(uid, gid int) (int, int, error) { + var err error + ruid, rgid := i.RootPair() + + if uid != ruid { + ruid, err = toHost(uid, i.UIDMaps) + if err != nil { + return ruid, rgid, err + } + } + + if gid != rgid { + rgid, err = toHost(gid, i.GIDMaps) + } + return ruid, rgid, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i IdentityMapping) ToContainer(uid, gid int) (int, int, error) { + ruid, err := toContainer(uid, i.UIDMaps) + if err != nil { + return -1, -1, err + } + rgid, err := toContainer(gid, i.GIDMaps) + return ruid, rgid, err +} + +// Empty returns true if there are no id mappings +func (i IdentityMapping) Empty() bool { + return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 +} diff --git a/vendor/github.com/moby/sys/user/idtools_unix.go b/vendor/github.com/moby/sys/user/idtools_unix.go new file mode 100644 index 00000000..4e39d244 --- /dev/null +++ b/vendor/github.com/moby/sys/user/idtools_unix.go @@ -0,0 +1,143 @@ +//go:build !windows + +package user + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "syscall" +) + +func mkdirAs(path string, mode os.FileMode, uid, gid int, mkAll, onlyNew bool) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil { + if !stat.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if onlyNew { + return nil + } + + // short-circuit -- we were called with an existing directory and chown was requested + return setPermissions(path, mode, uid, gid, stat) + } + + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If onlyNew is true, we won't + // chown the full directory path if it exists + var paths []string + if os.IsNotExist(err) { + paths = append(paths, path) + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err = os.Stat(dirPath); os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err = os.MkdirAll(path, mode); err != nil { + return err + } + } else if err = os.Mkdir(path, mode); err != nil { + return err + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err = setPermissions(pathComponent, mode, uid, gid, nil); err != nil { + return err + } + } + return nil +} + +// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +// Likewise for setting permissions. +func setPermissions(p string, mode os.FileMode, uid, gid int, stat os.FileInfo) error { + if stat == nil { + var err error + stat, err = os.Stat(p) + if err != nil { + return err + } + } + if stat.Mode().Perm() != mode.Perm() { + if err := os.Chmod(p, mode.Perm()); err != nil { + return err + } + } + ssi := stat.Sys().(*syscall.Stat_t) + if ssi.Uid == uint32(uid) && ssi.Gid == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} + +// LoadIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func LoadIdentityMapping(name string) (IdentityMapping, error) { + // TODO: Consider adding support for calling out to "getent" + usr, err := LookupUser(name) + if err != nil { + return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %w", name, err) + } + + subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr) + if err != nil { + return IdentityMapping{}, err + } + subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr) + if err != nil { + return IdentityMapping{}, err + } + + return IdentityMapping{ + UIDMaps: subuidRanges, + GIDMaps: subgidRanges, + }, nil +} + +func lookupSubRangesFile(path string, usr User) ([]IDMap, error) { + uidstr := strconv.Itoa(usr.Uid) + rangeList, err := ParseSubIDFileFilter(path, func(sid SubID) bool { + return sid.Name == usr.Name || sid.Name == uidstr + }) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name) + } + + idMap := []IDMap{} + + var containerID int64 + for _, idrange := range rangeList { + idMap = append(idMap, IDMap{ + ID: containerID, + ParentID: idrange.SubID, + Count: idrange.Count, + }) + containerID = containerID + idrange.Count + } + return idMap, nil +} diff --git a/vendor/github.com/moby/sys/user/idtools_windows.go b/vendor/github.com/moby/sys/user/idtools_windows.go new file mode 100644 index 00000000..9de730ca --- /dev/null +++ b/vendor/github.com/moby/sys/user/idtools_windows.go @@ -0,0 +1,13 @@ +package user + +import ( + "os" +) + +// This is currently a wrapper around [os.MkdirAll] since currently +// permissions aren't set through this path, the identity isn't utilized. +// Ownership is handled elsewhere, but in the future could be support here +// too. +func mkdirAs(path string, _ os.FileMode, _, _ int, _, _ bool) error { + return os.MkdirAll(path, 0) +} diff --git a/vendor/github.com/oklog/ulid/.gitignore b/vendor/github.com/oklog/ulid/.gitignore deleted file mode 100644 index c92c4d56..00000000 --- a/vendor/github.com/oklog/ulid/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -#### joe made this: http://goel.io/joe - -#####=== Go ===##### - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - diff --git a/vendor/github.com/oklog/ulid/.travis.yml b/vendor/github.com/oklog/ulid/.travis.yml deleted file mode 100644 index 43eb762f..00000000 --- a/vendor/github.com/oklog/ulid/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -sudo: false -go: - - 1.10.x -install: - - go get -v github.com/golang/lint/golint - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - - go get -d -t -v ./... - - go build -v ./... -script: - - go vet ./... - - $HOME/gopath/bin/golint . - - go test -v -race ./... - - go test -v -covermode=count -coverprofile=cov.out - - $HOME/gopath/bin/goveralls -coverprofile=cov.out -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true diff --git a/vendor/github.com/oklog/ulid/AUTHORS.md b/vendor/github.com/oklog/ulid/AUTHORS.md deleted file mode 100644 index 95581c78..00000000 --- a/vendor/github.com/oklog/ulid/AUTHORS.md +++ /dev/null @@ -1,2 +0,0 @@ -- Peter Bourgon (@peterbourgon) -- Tomás Senart (@tsenart) diff --git a/vendor/github.com/oklog/ulid/CHANGELOG.md b/vendor/github.com/oklog/ulid/CHANGELOG.md deleted file mode 100644 index 8da38c6b..00000000 --- a/vendor/github.com/oklog/ulid/CHANGELOG.md +++ /dev/null @@ -1,33 +0,0 @@ -## 1.3.1 / 2018-10-02 - -* Use underlying entropy source for random increments in Monotonic (#32) - -## 1.3.0 / 2018-09-29 - -* Monotonic entropy support (#31) - -## 1.2.0 / 2018-09-09 - -* Add a function to convert Unix time in milliseconds back to time.Time (#30) - -## 1.1.0 / 2018-08-15 - -* Ensure random part is always read from the entropy reader in full (#28) - -## 1.0.0 / 2018-07-29 - -* Add ParseStrict and MustParseStrict functions (#26) -* Enforce overflow checking when parsing (#20) - -## 0.3.0 / 2017-01-03 - -* Implement ULID.Compare method - -## 0.2.0 / 2016-12-13 - -* Remove year 2262 Timestamp bug. (#1) -* Gracefully handle invalid encodings when parsing. - -## 0.1.0 / 2016-12-06 - -* First ULID release diff --git a/vendor/github.com/oklog/ulid/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/CONTRIBUTING.md deleted file mode 100644 index 68f03f26..00000000 --- a/vendor/github.com/oklog/ulid/CONTRIBUTING.md +++ /dev/null @@ -1,17 +0,0 @@ -# Contributing - -We use GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull - request, addressing (with `@...`) one or more of the maintainers - (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. - -* If you plan to do something more involved, first propose your ideas - in a Github issue. This will avoid unnecessary work and surely give - you and us a good deal of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/oklog/ulid/Gopkg.lock b/vendor/github.com/oklog/ulid/Gopkg.lock deleted file mode 100644 index 349b449a..00000000 --- a/vendor/github.com/oklog/ulid/Gopkg.lock +++ /dev/null @@ -1,15 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "github.com/pborman/getopt" - packages = ["v2"] - revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "6779b05abd5cd429c5393641d2453005a3cb74a400d161b2b5c5d0ca2e10e116" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/oklog/ulid/Gopkg.toml b/vendor/github.com/oklog/ulid/Gopkg.toml deleted file mode 100644 index 624a7a01..00000000 --- a/vendor/github.com/oklog/ulid/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "github.com/pborman/getopt" diff --git a/vendor/github.com/oklog/ulid/README.md b/vendor/github.com/oklog/ulid/README.md deleted file mode 100644 index 0a3d2f82..00000000 --- a/vendor/github.com/oklog/ulid/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# Universally Unique Lexicographically Sortable Identifier - -![Project status](https://img.shields.io/badge/version-1.3.0-yellow.svg) -[![Build Status](https://secure.travis-ci.org/oklog/ulid.png)](http://travis-ci.org/oklog/ulid) -[![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid) -[![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master) -[![GoDoc](https://godoc.org/github.com/oklog/ulid?status.svg)](https://godoc.org/github.com/oklog/ulid) -[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE) - -A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented. - -## Background - -A GUID/UUID can be suboptimal for many use-cases because: - -- It isn't the most character efficient way of encoding 128 bits -- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address -- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures -- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures - -A ULID however: - -- Is compatible with UUID/GUID's -- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact) -- Lexicographically sortable -- Canonically encoded as a 26 character string, as opposed to the 36 character UUID -- Uses Crockford's base32 for better efficiency and readability (5 bits per character) -- Case insensitive -- No special characters (URL safe) -- Monotonic sort order (correctly detects and handles the same millisecond) - -## Install - -```shell -go get github.com/oklog/ulid -``` - -## Usage - -An ULID is constructed with a `time.Time` and an `io.Reader` entropy source. -This design allows for greater flexibility in choosing your trade-offs. - -Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use. -Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions. - - -```go -func ExampleULID() { - t := time.Unix(1000000, 0) - entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0) - fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy)) - // Output: 0000XSNJG0MQJHBF4QX1EFD6Y3 -} - -``` - -## Specification - -Below is the current specification of ULID as implemented in this repository. - -### Components - -**Timestamp** -- 48 bits -- UNIX-time in milliseconds -- Won't run out of space till the year 10895 AD - -**Entropy** -- 80 bits -- User defined entropy source. -- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic) - -### Encoding - -[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown. -This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse. - -``` -0123456789ABCDEFGHJKMNPQRSTVWXYZ -``` - -### Binary Layout and Byte Order - -The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order). - -``` -0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_time_high | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 16_bit_uint_time_low | 16_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -``` - -### String Representation - -``` - 01AN4Z07BY 79KA1307SR9X4MV3 -|----------| |----------------| - Timestamp Entropy - 10 chars 16 chars - 48bits 80bits - base32 base32 -``` - -## Test - -```shell -go test ./... -``` - -## Benchmarks - -On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1 - -``` -BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op -BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op -BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op -BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op -BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op -BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op -BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op -BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op -BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op -BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op -BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op -BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op -BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op -BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op -BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op -BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op -BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op -BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op -BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op -``` - -## Prior Art - -- [alizain/ulid](https://github.com/alizain/ulid) -- [RobThree/NUlid](https://github.com/RobThree/NUlid) -- [imdario/go-ulid](https://github.com/imdario/go-ulid) diff --git a/vendor/github.com/oklog/ulid/ulid.go b/vendor/github.com/oklog/ulid/ulid.go deleted file mode 100644 index c5d0d66f..00000000 --- a/vendor/github.com/oklog/ulid/ulid.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2016 The Oklog Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ulid - -import ( - "bufio" - "bytes" - "database/sql/driver" - "encoding/binary" - "errors" - "io" - "math" - "math/bits" - "math/rand" - "time" -) - -/* -An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier - - The components are encoded as 16 octets. - Each component is encoded with the MSB first (network byte order). - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_time_high | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 16_bit_uint_time_low | 16_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -*/ -type ULID [16]byte - -var ( - // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong - // data size. - ErrDataSize = errors.New("ulid: bad data size when unmarshaling") - - // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with - // invalid Base32 encodings. - ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling") - - // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient - // size. - ErrBufferSize = errors.New("ulid: bad buffer size when marshaling") - - // ErrBigTime is returned when constructing an ULID with a time that is larger - // than MaxTime. - ErrBigTime = errors.New("ulid: time too big") - - // ErrOverflow is returned when unmarshaling a ULID whose first character is - // larger than 7, thereby exceeding the valid bit depth of 128. - ErrOverflow = errors.New("ulid: overflow when unmarshaling") - - // ErrMonotonicOverflow is returned by a Monotonic entropy source when - // incrementing the previous ULID's entropy bytes would result in overflow. - ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow") - - // ErrScanValue is returned when the value passed to scan cannot be unmarshaled - // into the ULID. - ErrScanValue = errors.New("ulid: source value must be a string or byte slice") -) - -// New returns an ULID with the given Unix milliseconds timestamp and an -// optional entropy source. Use the Timestamp function to convert -// a time.Time to Unix milliseconds. -// -// ErrBigTime is returned when passing a timestamp bigger than MaxTime. -// Reading from the entropy source may also return an error. -func New(ms uint64, entropy io.Reader) (id ULID, err error) { - if err = id.SetTime(ms); err != nil { - return id, err - } - - switch e := entropy.(type) { - case nil: - return id, err - case *monotonic: - err = e.MonotonicRead(ms, id[6:]) - default: - _, err = io.ReadFull(e, id[6:]) - } - - return id, err -} - -// MustNew is a convenience function equivalent to New that panics on failure -// instead of returning an error. -func MustNew(ms uint64, entropy io.Reader) ULID { - id, err := New(ms, entropy) - if err != nil { - panic(err) - } - return id -} - -// Parse parses an encoded ULID, returning an error in case of failure. -// -// ErrDataSize is returned if the len(ulid) is different from an encoded -// ULID's length. Invalid encodings produce undefined ULIDs. For a version that -// returns an error instead, see ParseStrict. -func Parse(ulid string) (id ULID, err error) { - return id, parse([]byte(ulid), false, &id) -} - -// ParseStrict parses an encoded ULID, returning an error in case of failure. -// -// It is like Parse, but additionally validates that the parsed ULID consists -// only of valid base32 characters. It is slightly slower than Parse. -// -// ErrDataSize is returned if the len(ulid) is different from an encoded -// ULID's length. Invalid encodings return ErrInvalidCharacters. -func ParseStrict(ulid string) (id ULID, err error) { - return id, parse([]byte(ulid), true, &id) -} - -func parse(v []byte, strict bool, id *ULID) error { - // Check if a base32 encoded ULID is the right length. - if len(v) != EncodedSize { - return ErrDataSize - } - - // Check if all the characters in a base32 encoded ULID are part of the - // expected base32 character set. - if strict && - (dec[v[0]] == 0xFF || - dec[v[1]] == 0xFF || - dec[v[2]] == 0xFF || - dec[v[3]] == 0xFF || - dec[v[4]] == 0xFF || - dec[v[5]] == 0xFF || - dec[v[6]] == 0xFF || - dec[v[7]] == 0xFF || - dec[v[8]] == 0xFF || - dec[v[9]] == 0xFF || - dec[v[10]] == 0xFF || - dec[v[11]] == 0xFF || - dec[v[12]] == 0xFF || - dec[v[13]] == 0xFF || - dec[v[14]] == 0xFF || - dec[v[15]] == 0xFF || - dec[v[16]] == 0xFF || - dec[v[17]] == 0xFF || - dec[v[18]] == 0xFF || - dec[v[19]] == 0xFF || - dec[v[20]] == 0xFF || - dec[v[21]] == 0xFF || - dec[v[22]] == 0xFF || - dec[v[23]] == 0xFF || - dec[v[24]] == 0xFF || - dec[v[25]] == 0xFF) { - return ErrInvalidCharacters - } - - // Check if the first character in a base32 encoded ULID will overflow. This - // happens because the base32 representation encodes 130 bits, while the - // ULID is only 128 bits. - // - // See https://github.com/oklog/ulid/issues/9 for details. - if v[0] > '7' { - return ErrOverflow - } - - // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid) - // to decode a base32 ULID. - - // 6 bytes timestamp (48 bits) - (*id)[0] = ((dec[v[0]] << 5) | dec[v[1]]) - (*id)[1] = ((dec[v[2]] << 3) | (dec[v[3]] >> 2)) - (*id)[2] = ((dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4)) - (*id)[3] = ((dec[v[5]] << 4) | (dec[v[6]] >> 1)) - (*id)[4] = ((dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3)) - (*id)[5] = ((dec[v[8]] << 5) | dec[v[9]]) - - // 10 bytes of entropy (80 bits) - (*id)[6] = ((dec[v[10]] << 3) | (dec[v[11]] >> 2)) - (*id)[7] = ((dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4)) - (*id)[8] = ((dec[v[13]] << 4) | (dec[v[14]] >> 1)) - (*id)[9] = ((dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3)) - (*id)[10] = ((dec[v[16]] << 5) | dec[v[17]]) - (*id)[11] = ((dec[v[18]] << 3) | dec[v[19]]>>2) - (*id)[12] = ((dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4)) - (*id)[13] = ((dec[v[21]] << 4) | (dec[v[22]] >> 1)) - (*id)[14] = ((dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3)) - (*id)[15] = ((dec[v[24]] << 5) | dec[v[25]]) - - return nil -} - -// MustParse is a convenience function equivalent to Parse that panics on failure -// instead of returning an error. -func MustParse(ulid string) ULID { - id, err := Parse(ulid) - if err != nil { - panic(err) - } - return id -} - -// MustParseStrict is a convenience function equivalent to ParseStrict that -// panics on failure instead of returning an error. -func MustParseStrict(ulid string) ULID { - id, err := ParseStrict(ulid) - if err != nil { - panic(err) - } - return id -} - -// String returns a lexicographically sortable string encoded ULID -// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3 -// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy -func (id ULID) String() string { - ulid := make([]byte, EncodedSize) - _ = id.MarshalTextTo(ulid) - return string(ulid) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface by -// returning the ULID as a byte slice. -func (id ULID) MarshalBinary() ([]byte, error) { - ulid := make([]byte, len(id)) - return ulid, id.MarshalBinaryTo(ulid) -} - -// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer. -// ErrBufferSize is returned when the len(dst) != 16. -func (id ULID) MarshalBinaryTo(dst []byte) error { - if len(dst) != len(id) { - return ErrBufferSize - } - - copy(dst, id[:]) - return nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by -// copying the passed data and converting it to an ULID. ErrDataSize is -// returned if the data length is different from ULID length. -func (id *ULID) UnmarshalBinary(data []byte) error { - if len(data) != len(*id) { - return ErrDataSize - } - - copy((*id)[:], data) - return nil -} - -// Encoding is the base 32 encoding alphabet used in ULID strings. -const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" - -// MarshalText implements the encoding.TextMarshaler interface by -// returning the string encoded ULID. -func (id ULID) MarshalText() ([]byte, error) { - ulid := make([]byte, EncodedSize) - return ulid, id.MarshalTextTo(ulid) -} - -// MarshalTextTo writes the ULID as a string to the given buffer. -// ErrBufferSize is returned when the len(dst) != 26. -func (id ULID) MarshalTextTo(dst []byte) error { - // Optimized unrolled loop ahead. - // From https://github.com/RobThree/NUlid - - if len(dst) != EncodedSize { - return ErrBufferSize - } - - // 10 byte timestamp - dst[0] = Encoding[(id[0]&224)>>5] - dst[1] = Encoding[id[0]&31] - dst[2] = Encoding[(id[1]&248)>>3] - dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)] - dst[4] = Encoding[(id[2]&62)>>1] - dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)] - dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)] - dst[7] = Encoding[(id[4]&124)>>2] - dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)] - dst[9] = Encoding[id[5]&31] - - // 16 bytes of entropy - dst[10] = Encoding[(id[6]&248)>>3] - dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)] - dst[12] = Encoding[(id[7]&62)>>1] - dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)] - dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)] - dst[15] = Encoding[(id[9]&124)>>2] - dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)] - dst[17] = Encoding[id[10]&31] - dst[18] = Encoding[(id[11]&248)>>3] - dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)] - dst[20] = Encoding[(id[12]&62)>>1] - dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)] - dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)] - dst[23] = Encoding[(id[14]&124)>>2] - dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)] - dst[25] = Encoding[id[15]&31] - - return nil -} - -// Byte to index table for O(1) lookups when unmarshaling. -// We use 0xFF as sentinel value for invalid indexes. -var dec = [...]byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, - 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, - 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, - 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, - 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, -} - -// EncodedSize is the length of a text encoded ULID. -const EncodedSize = 26 - -// UnmarshalText implements the encoding.TextUnmarshaler interface by -// parsing the data as string encoded ULID. -// -// ErrDataSize is returned if the len(v) is different from an encoded -// ULID's length. Invalid encodings produce undefined ULIDs. -func (id *ULID) UnmarshalText(v []byte) error { - return parse(v, false, id) -} - -// Time returns the Unix time in milliseconds encoded in the ULID. -// Use the top level Time function to convert the returned value to -// a time.Time. -func (id ULID) Time() uint64 { - return uint64(id[5]) | uint64(id[4])<<8 | - uint64(id[3])<<16 | uint64(id[2])<<24 | - uint64(id[1])<<32 | uint64(id[0])<<40 -} - -// maxTime is the maximum Unix time in milliseconds that can be -// represented in an ULID. -var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time() - -// MaxTime returns the maximum Unix time in milliseconds that -// can be encoded in an ULID. -func MaxTime() uint64 { return maxTime } - -// Now is a convenience function that returns the current -// UTC time in Unix milliseconds. Equivalent to: -// Timestamp(time.Now().UTC()) -func Now() uint64 { return Timestamp(time.Now().UTC()) } - -// Timestamp converts a time.Time to Unix milliseconds. -// -// Because of the way ULID stores time, times from the year -// 10889 produces undefined results. -func Timestamp(t time.Time) uint64 { - return uint64(t.Unix())*1000 + - uint64(t.Nanosecond()/int(time.Millisecond)) -} - -// Time converts Unix milliseconds in the format -// returned by the Timestamp function to a time.Time. -func Time(ms uint64) time.Time { - s := int64(ms / 1e3) - ns := int64((ms % 1e3) * 1e6) - return time.Unix(s, ns) -} - -// SetTime sets the time component of the ULID to the given Unix time -// in milliseconds. -func (id *ULID) SetTime(ms uint64) error { - if ms > maxTime { - return ErrBigTime - } - - (*id)[0] = byte(ms >> 40) - (*id)[1] = byte(ms >> 32) - (*id)[2] = byte(ms >> 24) - (*id)[3] = byte(ms >> 16) - (*id)[4] = byte(ms >> 8) - (*id)[5] = byte(ms) - - return nil -} - -// Entropy returns the entropy from the ULID. -func (id ULID) Entropy() []byte { - e := make([]byte, 10) - copy(e, id[6:]) - return e -} - -// SetEntropy sets the ULID entropy to the passed byte slice. -// ErrDataSize is returned if len(e) != 10. -func (id *ULID) SetEntropy(e []byte) error { - if len(e) != 10 { - return ErrDataSize - } - - copy((*id)[6:], e) - return nil -} - -// Compare returns an integer comparing id and other lexicographically. -// The result will be 0 if id==other, -1 if id < other, and +1 if id > other. -func (id ULID) Compare(other ULID) int { - return bytes.Compare(id[:], other[:]) -} - -// Scan implements the sql.Scanner interface. It supports scanning -// a string or byte slice. -func (id *ULID) Scan(src interface{}) error { - switch x := src.(type) { - case nil: - return nil - case string: - return id.UnmarshalText([]byte(x)) - case []byte: - return id.UnmarshalBinary(x) - } - - return ErrScanValue -} - -// Value implements the sql/driver.Valuer interface. This returns the value -// represented as a byte slice. If instead a string is desirable, a wrapper -// type can be created that calls String(). -// -// // stringValuer wraps a ULID as a string-based driver.Valuer. -// type stringValuer ULID -// -// func (id stringValuer) Value() (driver.Value, error) { -// return ULID(id).String(), nil -// } -// -// // Example usage. -// db.Exec("...", stringValuer(id)) -func (id ULID) Value() (driver.Value, error) { - return id.MarshalBinary() -} - -// Monotonic returns an entropy source that is guaranteed to yield -// strictly increasing entropy bytes for the same ULID timestamp. -// On conflicts, the previous ULID entropy is incremented with a -// random number between 1 and `inc` (inclusive). -// -// The provided entropy source must actually yield random bytes or else -// monotonic reads are not guaranteed to terminate, since there isn't -// enough randomness to compute an increment number. -// -// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`. -// The lower the value of `inc`, the easier the next ULID within the -// same millisecond is to guess. If your code depends on ULIDs having -// secure entropy bytes, then don't go under this default unless you know -// what you're doing. -// -// The returned io.Reader isn't safe for concurrent use. -func Monotonic(entropy io.Reader, inc uint64) io.Reader { - m := monotonic{ - Reader: bufio.NewReader(entropy), - inc: inc, - } - - if m.inc == 0 { - m.inc = math.MaxUint32 - } - - if rng, ok := entropy.(*rand.Rand); ok { - m.rng = rng - } - - return &m -} - -type monotonic struct { - io.Reader - ms uint64 - inc uint64 - entropy uint80 - rand [8]byte - rng *rand.Rand -} - -func (m *monotonic) MonotonicRead(ms uint64, entropy []byte) (err error) { - if !m.entropy.IsZero() && m.ms == ms { - err = m.increment() - m.entropy.AppendTo(entropy) - } else if _, err = io.ReadFull(m.Reader, entropy); err == nil { - m.ms = ms - m.entropy.SetBytes(entropy) - } - return err -} - -// increment the previous entropy number with a random number -// of up to m.inc (inclusive). -func (m *monotonic) increment() error { - if inc, err := m.random(); err != nil { - return err - } else if m.entropy.Add(inc) { - return ErrMonotonicOverflow - } - return nil -} - -// random returns a uniform random value in [1, m.inc), reading entropy -// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1. -// Adapted from: https://golang.org/pkg/crypto/rand/#Int -func (m *monotonic) random() (inc uint64, err error) { - if m.inc <= 1 { - return 1, nil - } - - // Fast path for using a underlying rand.Rand directly. - if m.rng != nil { - // Range: [1, m.inc) - return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil - } - - // bitLen is the maximum bit length needed to encode a value < m.inc. - bitLen := bits.Len64(m.inc) - - // byteLen is the maximum byte length needed to encode a value < m.inc. - byteLen := uint(bitLen+7) / 8 - - // msbitLen is the number of bits in the most significant byte of m.inc-1. - msbitLen := uint(bitLen % 8) - if msbitLen == 0 { - msbitLen = 8 - } - - for inc == 0 || inc >= m.inc { - if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil { - return 0, err - } - - // Clear bits in the first byte to increase the probability - // that the candidate is < m.inc. - m.rand[0] &= uint8(int(1< 0 { low += ":" + bitsetToStr(m.low.cats) } @@ -631,7 +634,7 @@ func (m mlsRange) String() string { return low } - high := "s" + strconv.Itoa(int(m.high.sens)) + high := "s" + strconv.Itoa(m.high.sens) if m.high.cats != nil && m.high.cats.BitLen() > 0 { high += ":" + bitsetToStr(m.high.cats) } @@ -639,15 +642,16 @@ func (m mlsRange) String() string { return low + "-" + high } -// TODO: remove min and max once Go < 1.21 is not supported. -func max(a, b uint) uint { +// TODO: remove these in favor of built-in min/max +// once we stop supporting Go < 1.21. +func maxInt(a, b int) int { if a > b { return a } return b } -func min(a, b uint) uint { +func minInt(a, b int) int { if a < b { return a } @@ -676,10 +680,10 @@ func calculateGlbLub(sourceRange, targetRange string) (string, error) { outrange := &mlsRange{low: &level{}, high: &level{}} /* take the greatest of the low */ - outrange.low.sens = max(s.low.sens, t.low.sens) + outrange.low.sens = maxInt(s.low.sens, t.low.sens) /* take the least of the high */ - outrange.high.sens = min(s.high.sens, t.high.sens) + outrange.high.sens = minInt(s.high.sens, t.high.sens) /* find the intersecting categories */ if s.low.cats != nil && t.low.cats != nil { @@ -731,6 +735,9 @@ func setKeyLabel(label string) error { if label == "" && errors.Is(err, os.ErrPermission) { return nil } + if errors.Is(err, unix.EACCES) && unix.Getuid() != unix.Gettid() { + return ErrNotTGLeader + } return err } @@ -809,8 +816,7 @@ func enforceMode() int { // setEnforceMode sets the current SELinux mode Enforcing, Permissive. // Disabled is not valid, since this needs to be set at boot time. func setEnforceMode(mode int) error { - //nolint:gosec // ignore G306: permissions to be 0600 or less. - return os.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644) + return os.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0) } // defaultEnforceMode returns the systems default SELinux mode Enforcing, @@ -1017,8 +1023,7 @@ func addMcs(processLabel, fileLabel string) (string, string) { // securityCheckContext validates that the SELinux label is understood by the kernel func securityCheckContext(val string) error { - //nolint:gosec // ignore G306: permissions to be 0600 or less. - return os.WriteFile(filepath.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644) + return os.WriteFile(filepath.Join(getSelinuxMountPoint(), "context"), []byte(val), 0) } // copyLevel returns a label with the MLS/MCS level from src label replaced on diff --git a/vendor/github.com/ostreedev/ostree-go/LICENSE b/vendor/github.com/ostreedev/ostree-go/LICENSE deleted file mode 100644 index aa93b4da..00000000 --- a/vendor/github.com/ostreedev/ostree-go/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -Portions of this code are derived from: - -https://github.com/dradtke/gotk3 - -Copyright (c) 2013 Conformal Systems LLC. - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go deleted file mode 100644 index a4ad0f00..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -import ( - "unsafe" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" - -/* - * GBoolean - */ - -// GBoolean is a Go representation of glib's gboolean -type GBoolean C.gboolean - -func NewGBoolean() GBoolean { - return GBoolean(0) -} - -func GBool(b bool) GBoolean { - if b { - return GBoolean(1) - } - return GBoolean(0) -} - -func (b GBoolean) Ptr() unsafe.Pointer { - return unsafe.Pointer(&b) -} - -func GoBool(b GBoolean) bool { - if b != 0 { - return true - } - return false -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go deleted file mode 100644 index 537db472..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" - -import ( - "unsafe" -) - -// GIO types - -type GCancellable struct { - *GObject -} - -func (self *GCancellable) native() *C.GCancellable { - return (*C.GCancellable)(unsafe.Pointer(self)) -} - -func (self *GCancellable) Ptr() unsafe.Pointer { - return unsafe.Pointer(self) -} - -// At the moment, no cancellable API, just pass nil diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go deleted file mode 100644 index 714b15d0..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" -import ( - "errors" - "unsafe" -) - -/* - * GError - */ - -// GError is a representation of GLib's GError -type GError struct { - ptr unsafe.Pointer -} - -func NewGError() GError { - return GError{nil} -} - -func (e GError) Ptr() unsafe.Pointer { - if e.ptr == nil { - return nil - } - return e.ptr -} - -func (e GError) Nil() { - e.ptr = nil -} - -func (e *GError) native() *C.GError { - if e == nil || e.ptr == nil { - return nil - } - return (*C.GError)(e.ptr) -} - -func ToGError(ptr unsafe.Pointer) GError { - return GError{ptr} -} - -func ConvertGError(e GError) error { - defer C.g_error_free(e.native()) - return errors.New(C.GoString((*C.char)(C._g_error_get_message(e.native())))) -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go deleted file mode 100644 index babe7050..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" -import ( - "unsafe" -) - -/* - * GFile - */ - -type GFile struct { - ptr unsafe.Pointer -} - -func (f GFile) Ptr() unsafe.Pointer { - return f.ptr -} - -func NewGFile() *GFile { - return &GFile{nil} -} - -func ToGFile(ptr unsafe.Pointer) *GFile { - gf := NewGFile() - gf.ptr = ptr - return gf -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go deleted file mode 100644 index 9c155834..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" -import ( - "unsafe" -) - -/* - * GFileInfo - */ - -type GFileInfo struct { - ptr unsafe.Pointer -} - -func (fi GFileInfo) Ptr() unsafe.Pointer { - return fi.ptr -} - -func NewGFileInfo() GFileInfo { - var fi GFileInfo = GFileInfo{nil} - return fi -} - -func ToGFileInfo(p unsafe.Pointer) *GFileInfo { - var fi *GFileInfo = &GFileInfo{} - fi.ptr = p - return fi -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go deleted file mode 100644 index 20cc321c..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -import ( - "unsafe" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" - -/* - * GHashTable - */ -type GHashTable struct { - ptr unsafe.Pointer -} - -func (ht *GHashTable) Ptr() unsafe.Pointer { - return ht.ptr -} - -func (ht *GHashTable) native() *C.GHashTable { - return (*C.GHashTable)(ht.ptr) -} - -func ToGHashTable(ptr unsafe.Pointer) *GHashTable { - return &GHashTable{ptr} -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go deleted file mode 100644 index 1657edf5..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -import ( - "unsafe" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" - -/* - * GHashTableIter - */ -type GHashTableIter struct { - ptr unsafe.Pointer -} - -func (ht *GHashTableIter) Ptr() unsafe.Pointer { - return ht.ptr -} - -func (ht *GHashTableIter) native() *C.GHashTableIter { - return (*C.GHashTableIter)(ht.ptr) -} - -func ToGHashTableIter(ptr unsafe.Pointer) *GHashTableIter { - return &GHashTableIter{ptr} -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go deleted file mode 100644 index f3d3aa52..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h deleted file mode 100644 index a55bd242..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h +++ /dev/null @@ -1,17 +0,0 @@ -#include - -static char * -_g_error_get_message (GError *error) -{ - g_assert (error != NULL); - return error->message; -} - -static const char * -_g_variant_lookup_string (GVariant *v, const char *key) -{ - const char *r; - if (g_variant_lookup (v, key, "&s", &r)) - return r; - return NULL; -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go deleted file mode 100644 index dedbe749..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" -import ( - "unsafe" -) - -/* - * GObject - */ - -// IObject is an interface type implemented by Object and all types which embed -// an Object. It is meant to be used as a type for function arguments which -// require GObjects or any subclasses thereof. -type IObject interface { - toGObject() *C.GObject - ToObject() *GObject -} - -// GObject is a representation of GLib's GObject. -type GObject struct { - ptr unsafe.Pointer -} - -func (v *GObject) Ptr() unsafe.Pointer { - return v.ptr -} - -func (v *GObject) native() *C.GObject { - if v == nil { - return nil - } - return (*C.GObject)(v.ptr) -} - -func (v *GObject) Ref() { - C.g_object_ref(C.gpointer(v.Ptr())) -} - -func (v *GObject) Unref() { - C.g_object_unref(C.gpointer(v.Ptr())) -} - -func (v *GObject) RefSink() { - C.g_object_ref_sink(C.gpointer(v.native())) -} - -func (v *GObject) IsFloating() bool { - c := C.g_object_is_floating(C.gpointer(v.native())) - return GoBool(GBoolean(c)) -} - -func (v *GObject) ForceFloating() { - C.g_object_force_floating(v.native()) -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go deleted file mode 100644 index 05fd54a1..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -import ( - "unsafe" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" - -/* - * GOptionContext - */ - -type GOptionContext struct { - ptr unsafe.Pointer -} - -func (oc *GOptionContext) Ptr() unsafe.Pointer { - return oc.ptr -} - -func (oc *GOptionContext) native() *C.GOptionContext { - return (*C.GOptionContext)(oc.ptr) -} - -func ToGOptionContext(ptr unsafe.Pointer) GOptionContext { - return GOptionContext{ptr} -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go deleted file mode 100644 index 6a6acfd9..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" -import ( - "fmt" - "unsafe" -) - -/* - * GVariant - */ - -type GVariant struct { - ptr unsafe.Pointer -} - -//func GVariantNew(p unsafe.Pointer) *GVariant { -//o := &GVariant{p} -//runtime.SetFinalizer(o, (*GVariant).Unref) -//return o; -//} - -//func GVariantNewSink(p unsafe.Pointer) *GVariant { -//o := &GVariant{p} -//runtime.SetFinalizer(o, (*GVariant).Unref) -//o.RefSink() -//return o; -//} - -func (v *GVariant) native() *C.GVariant { - return (*C.GVariant)(v.ptr) -} - -func (v *GVariant) Ptr() unsafe.Pointer { - if v == nil { - return nil - } - return v.ptr -} - -func (v *GVariant) Ref() { - C.g_variant_ref(v.native()) -} - -func (v *GVariant) Unref() { - C.g_variant_unref(v.native()) -} - -func (v *GVariant) RefSink() { - C.g_variant_ref_sink(v.native()) -} - -func (v *GVariant) TypeString() string { - cs := (*C.char)(C.g_variant_get_type_string(v.native())) - return C.GoString(cs) -} - -func (v *GVariant) GetChildValue(i int) *GVariant { - cchild := C.g_variant_get_child_value(v.native(), C.gsize(i)) - return (*GVariant)(unsafe.Pointer(cchild)) -} - -func (v *GVariant) LookupString(key string) (string, error) { - ckey := C.CString(key) - defer C.free(unsafe.Pointer(ckey)) - // TODO: Find a way to have constant C strings in golang - cstr := C._g_variant_lookup_string(v.native(), ckey) - if cstr == nil { - return "", fmt.Errorf("No such key: %s", key) - } - return C.GoString(cstr), nil -} - -func ToGVariant(ptr unsafe.Pointer) *GVariant { - return &GVariant{ptr} -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go deleted file mode 100644 index 8b0236ff..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go +++ /dev/null @@ -1,119 +0,0 @@ -// Package otbuiltin contains all of the basic commands for creating and -// interacting with an ostree repository -package otbuiltin - -import ( - "errors" - "fmt" - "runtime" - "unsafe" - - glib "github.com/ostreedev/ostree-go/pkg/glibobject" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -// Repo represents a local ostree repository -type Repo struct { - ptr unsafe.Pointer -} - -// isInitialized checks if the repo has been initialized -func (r *Repo) isInitialized() bool { - if r == nil || r.ptr == nil { - return false - } - return true -} - -// native converts an ostree repo struct to its C equivalent -func (r *Repo) native() *C.OstreeRepo { - if !r.isInitialized() { - return nil - } - return (*C.OstreeRepo)(r.ptr) -} - -// repoFromNative takes a C ostree repo and converts it to a Go struct -func repoFromNative(or *C.OstreeRepo) *Repo { - if or == nil { - return nil - } - r := &Repo{unsafe.Pointer(or)} - return r -} - -// OpenRepo attempts to open the repo at the given path -func OpenRepo(path string) (*Repo, error) { - if path == "" { - return nil, errors.New("empty path") - } - - cpath := C.CString(path) - defer C.free(unsafe.Pointer(cpath)) - repoPath := C.g_file_new_for_path(cpath) - defer C.g_object_unref(C.gpointer(repoPath)) - crepo := C.ostree_repo_new(repoPath) - repo := repoFromNative(crepo) - - var cerr *C.GError - r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, nil, &cerr))) - if !r { - return nil, generateError(cerr) - } - - return repo, nil -} - -// enableTombstoneCommits enables support for tombstone commits. -// -// This allows to distinguish between intentional deletions and accidental removals -// of commits. -func (r *Repo) enableTombstoneCommits() error { - if !r.isInitialized() { - return errors.New("repo not initialized") - } - - config := C.ostree_repo_get_config(r.native()) - groupC := C.CString("core") - defer C.free(unsafe.Pointer(groupC)) - keyC := C.CString("tombstone-commits") - defer C.free(unsafe.Pointer(keyC)) - valueC := C.g_key_file_get_boolean(config, (*C.gchar)(groupC), (*C.gchar)(keyC), nil) - tombstoneCommits := glib.GoBool(glib.GBoolean(valueC)) - - // tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file - if !tombstoneCommits { - var cerr *C.GError - C.g_key_file_set_boolean(config, (*C.gchar)(groupC), (*C.gchar)(keyC), C.TRUE) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(r.native(), config, &cerr))) { - return generateError(cerr) - } - } - return nil -} - -// generateError wraps a GLib error into a Go one. -func generateError(err *C.GError) error { - if err == nil { - return errors.New("nil GError") - } - - goErr := glib.ConvertGError(glib.ToGError(unsafe.Pointer(err))) - _, file, line, ok := runtime.Caller(1) - if ok { - return fmt.Errorf("%s:%d - %s", file, line, goErr) - } - return goErr -} - -// isOk wraps a gboolean return value into a bool. -// 0 is false/error, everything else is true/ok. -func isOk(v C.gboolean) bool { - return glib.GoBool(glib.GBoolean(v)) -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h deleted file mode 100644 index 76171554..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h +++ /dev/null @@ -1,179 +0,0 @@ -#ifndef BUILTIN_GO_H -#define BUILTIN_GO_H - -#include -#include -#include -#include - -static guint32 owner_uid; -static guint32 owner_gid; - -static void -_ostree_repo_append_modifier_flags(OstreeRepoCommitModifierFlags *flags, int flag) { - *flags |= flag; -} - -struct CommitFilterData { - GHashTable *mode_adds; - GHashTable *skip_list; -}; - -typedef struct CommitFilterData CommitFilterData; - -static char* _gptr_to_str(gpointer p) -{ - return (char*)p; -} - -// The following 3 functions are wrapper functions for macros since CGO can't parse macros -static OstreeRepoFile* -_ostree_repo_file(GFile *file) -{ - return OSTREE_REPO_FILE (file); -} - -static gpointer -_guint_to_pointer (guint u) -{ - return GUINT_TO_POINTER (u); -} - -static const GVariantType* -_g_variant_type (char *type) -{ - return G_VARIANT_TYPE (type); -} - -static int -_at_fdcwd () -{ - return AT_FDCWD; -} - -static guint64 -_guint64_from_be (guint64 val) -{ - return GUINT64_FROM_BE (val); -} - - - -// These functions are wrappers for variadic functions since CGO can't parse variadic functions -static void -_g_printerr_onearg (char* msg, - char* arg) -{ - g_printerr("%s %s\n", msg, arg); -} - -static void -_g_set_error_onearg (GError *err, - char* msg, - char* arg) -{ - g_set_error(&err, G_IO_ERROR, G_IO_ERROR_FAILED, "%s %s", msg, arg); -} - -static void -_g_variant_builder_add_twoargs (GVariantBuilder* builder, - const char *format_string, - char *arg1, - GVariant *arg2) -{ - g_variant_builder_add(builder, format_string, arg1, arg2); -} - -static GHashTable* -_g_hash_table_new_full () -{ - return g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); -} - -static void -_g_variant_get_commit_dump (GVariant *variant, - const char *format, - char **subject, - char **body, - guint64 *timestamp) -{ - return g_variant_get (variant, format, NULL, NULL, NULL, subject, body, timestamp, NULL, NULL); -} - -static guint32 -_binary_or (guint32 a, guint32 b) -{ - return a | b; -} - -static void -_cleanup (OstreeRepo *self, - OstreeRepoCommitModifier *modifier, - GCancellable *cancellable, - GError **out_error) -{ - if (self) - ostree_repo_abort_transaction(self, cancellable, out_error); - if (modifier) - ostree_repo_commit_modifier_unref (modifier); -} - -// The following functions make up a commit_filter function that gets passed into -// another C function (and thus can't be a go function) as well as its helpers -static OstreeRepoCommitFilterResult -_commit_filter (OstreeRepo *self, - const char *path, - GFileInfo *file_info, - gpointer user_data) -{ - struct CommitFilterData *data = user_data; - GHashTable *mode_adds = data->mode_adds; - GHashTable *skip_list = data->skip_list; - gpointer value; - - if (owner_uid >= 0) - g_file_info_set_attribute_uint32 (file_info, "unix::uid", owner_uid); - if (owner_gid >= 0) - g_file_info_set_attribute_uint32 (file_info, "unix::gid", owner_gid); - - if (mode_adds && g_hash_table_lookup_extended (mode_adds, path, NULL, &value)) - { - guint current_mode = g_file_info_get_attribute_uint32 (file_info, "unix::mode"); - guint mode_add = GPOINTER_TO_UINT (value); - g_file_info_set_attribute_uint32 (file_info, "unix::mode", - current_mode | mode_add); - g_hash_table_remove (mode_adds, path); - } - - if (skip_list && g_hash_table_contains (skip_list, path)) - { - g_hash_table_remove (skip_list, path); - return OSTREE_REPO_COMMIT_FILTER_SKIP; - } - - return OSTREE_REPO_COMMIT_FILTER_ALLOW; -} - - -static void -_set_owner_uid (guint32 uid) -{ - owner_uid = uid; -} - -static void _set_owner_gid (guint32 gid) -{ - owner_gid = gid; -} - -// Wrapper function for a function that takes a C function as a parameter. -// That translation doesn't work in go -static OstreeRepoCommitModifier* -_ostree_repo_commit_modifier_new_wrapper (OstreeRepoCommitModifierFlags flags, - gpointer user_data, - GDestroyNotify destroy_notify) -{ - return ostree_repo_commit_modifier_new(flags, _commit_filter, user_data, destroy_notify); -} - -#endif diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go deleted file mode 100644 index 299a8828..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go +++ /dev/null @@ -1,111 +0,0 @@ -package otbuiltin - -import ( - "errors" - "unsafe" - - glib "github.com/ostreedev/ostree-go/pkg/glibobject" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -// checkoutOptions defines all of the options for checking commits -// out of an ostree repo -// -// Note: while this is private, fields are public and part of the API. -type checkoutOptions struct { - // UserMode defines whether to checkout a repo in `bare-user` mode - UserMode bool - // Union specifies whether to overwrite existing filesystem entries - Union bool - // AllowNoEnt defines whether to skip filepaths that do not exist - AllowNoent bool - // DisableCache defines whether to disable internal repository uncompressed object cache - DisableCache bool - // Whiteouts defines whether to Process 'whiteout' (docker style) entries - Whiteouts bool - // RequireHardlinks defines whether to fall back to full copies if hard linking fails - RequireHardlinks bool - // SubPath specifies a sub-directory to use for checkout - Subpath string - // FromFile specifies an optional file containing many checkouts to process - FromFile string -} - -// NewCheckoutOptions instantiates and returns a checkoutOptions struct with default values set -func NewCheckoutOptions() checkoutOptions { - return checkoutOptions{} -} - -// Checkout checks out commit `commitRef` from a repository at `repoPath`, -// writing it to `destination`. Returns an error if the checkout could not be processed. -func Checkout(repoPath, destination, commitRef string, opts checkoutOptions) error { - var cancellable *glib.GCancellable - - ccommit := C.CString(commitRef) - defer C.free(unsafe.Pointer(ccommit)) - - var gerr = glib.NewGError() - cerr := (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - - repoPathc := C.g_file_new_for_path(C.CString(repoPath)) - defer C.g_object_unref(C.gpointer(repoPathc)) - crepo := C.ostree_repo_new(repoPathc) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - - // Multiple checkouts to process - if opts.FromFile != "" { - return processManyCheckouts(crepo, destination, cancellable) - } - - // Simple single checkout - var resolvedCommit *C.char - defer C.free(unsafe.Pointer(resolvedCommit)) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) { - return generateError(cerr) - } - - return processOneCheckout(crepo, resolvedCommit, destination, opts, cancellable) -} - -// processOneCheckout processes one checkout from the repo -func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, destination string, opts checkoutOptions, cancellable *glib.GCancellable) error { - cdest := C.CString(destination) - defer C.free(unsafe.Pointer(cdest)) - - var gerr = glib.NewGError() - cerr := (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - - // Process options into bitflags - var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions - if opts.UserMode { - repoCheckoutAtOptions.mode = C.OSTREE_REPO_CHECKOUT_MODE_USER - } - if opts.Union { - repoCheckoutAtOptions.overwrite_mode = C.OSTREE_REPO_CHECKOUT_OVERWRITE_UNION_FILES - } - if opts.RequireHardlinks { - repoCheckoutAtOptions.no_copy_fallback = C.TRUE - } - - // Checkout commit to destination - if !glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr))) { - return generateError(cerr) - } - - return nil -} - -// processManyCheckouts processes many checkouts in a single batch -func processManyCheckouts(crepo *C.OstreeRepo, target string, cancellable *glib.GCancellable) error { - return errors.New("batch checkouts processing: not implemented") -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go deleted file mode 100644 index ccaff7a1..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go +++ /dev/null @@ -1,483 +0,0 @@ -package otbuiltin - -import ( - "bytes" - "errors" - "fmt" - "strings" - "time" - "unsafe" - - glib "github.com/ostreedev/ostree-go/pkg/glibobject" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -// Declare global variable to store commitOptions -var options commitOptions - -// Declare a function prototype for being passed into another function -type handleLineFunc func(string, *glib.GHashTable) error - -// Contains all of the options for commmiting to an ostree repo. Initialize -// with NewCommitOptions() -type commitOptions struct { - Subject string // One line subject - Body string // Full description - Parent string // Parent of the commit - Tree []string // 'dir=PATH' or 'tar=TARFILE' or 'ref=COMMIT': overlay the given argument as a tree - AddMetadataString []string // Add a key/value pair to metadata - AddDetachedMetadataString []string // Add a key/value pair to detached metadata - OwnerUID int // Set file ownership to user id - OwnerGID int // Set file ownership to group id - NoXattrs bool // Do not import extended attributes - LinkCheckoutSpeedup bool // Optimize for commits of trees composed of hardlinks in the repository - TarAutoCreateParents bool // When loading tar archives, automatically create parent directories as needed - SkipIfUnchanged bool // If the contents are unchanged from a previous commit, do nothing - StatOverrideFile string // File containing list of modifications to make permissions - SkipListFile string // File containing list of file paths to skip - GenerateSizes bool // Generate size information along with commit metadata - GpgSign []string // GPG Key ID with which to sign the commit (if you have GPGME - GNU Privacy Guard Made Easy) - GpgHomedir string // GPG home directory to use when looking for keyrings (if you have GPGME - GNU Privacy Guard Made Easy) - Timestamp time.Time // Override the timestamp of the commit - Orphan bool // Commit does not belong to a branch - Fsync bool // Specify whether fsync should be used or not. Default to true -} - -// Initializes a commitOptions struct and sets default values -func NewCommitOptions() commitOptions { - var co commitOptions - co.OwnerUID = -1 - co.OwnerGID = -1 - co.Fsync = true - return co -} - -type OstreeRepoTransactionStats struct { - metadata_objects_total int32 - metadata_objects_written int32 - content_objects_total int32 - content_objects_written int32 - content_bytes_written uint64 -} - -func (repo *Repo) PrepareTransaction() (bool, error) { - var cerr *C.GError = nil - var resume C.gboolean - - r := glib.GoBool(glib.GBoolean(C.ostree_repo_prepare_transaction(repo.native(), &resume, nil, &cerr))) - if !r { - return false, generateError(cerr) - } - return glib.GoBool(glib.GBoolean(resume)), nil -} - -func (repo *Repo) CommitTransaction() (*OstreeRepoTransactionStats, error) { - var cerr *C.GError = nil - var stats OstreeRepoTransactionStats = OstreeRepoTransactionStats{} - statsPtr := (*C.OstreeRepoTransactionStats)(unsafe.Pointer(&stats)) - r := glib.GoBool(glib.GBoolean(C.ostree_repo_commit_transaction(repo.native(), statsPtr, nil, &cerr))) - if !r { - return nil, generateError(cerr) - } - return &stats, nil -} - -func (repo *Repo) TransactionSetRef(remote string, ref string, checksum string) { - var cRemote *C.char = nil - var cRef *C.char = nil - var cChecksum *C.char = nil - - if remote != "" { - cRemote = C.CString(remote) - } - if ref != "" { - cRef = C.CString(ref) - } - if checksum != "" { - cChecksum = C.CString(checksum) - } - C.ostree_repo_transaction_set_ref(repo.native(), cRemote, cRef, cChecksum) -} - -func (repo *Repo) AbortTransaction() error { - var cerr *C.GError = nil - r := glib.GoBool(glib.GBoolean(C.ostree_repo_abort_transaction(repo.native(), nil, &cerr))) - if !r { - return generateError(cerr) - } - return nil -} - -func (repo *Repo) RegenerateSummary() error { - var cerr *C.GError = nil - r := glib.GoBool(glib.GBoolean(C.ostree_repo_regenerate_summary(repo.native(), nil, nil, &cerr))) - if !r { - return generateError(cerr) - } - return nil -} - -// Commits a directory, specified by commitPath, to an ostree repo as a given branch -func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string, error) { - // TODO(lucab): `options` is global un-synchronized mutable state, get rid of it. - options = opts - - var err error - var modeAdds *glib.GHashTable - var skipList *glib.GHashTable - var objectToCommit *glib.GFile - var skipCommit bool = false - var ccommitChecksum *C.char - defer C.free(unsafe.Pointer(ccommitChecksum)) - var flags C.OstreeRepoCommitModifierFlags = 0 - var filter_data C.CommitFilterData - - var cerr *C.GError - defer C.free(unsafe.Pointer(cerr)) - var metadata *C.GVariant = nil - defer func() { - if metadata != nil { - defer C.g_variant_unref(metadata) - } - }() - - var detachedMetadata *C.GVariant = nil - defer C.free(unsafe.Pointer(detachedMetadata)) - var mtree *C.OstreeMutableTree - defer C.free(unsafe.Pointer(mtree)) - var root *C.GFile - defer C.free(unsafe.Pointer(root)) - var modifier *C.OstreeRepoCommitModifier - defer C.free(unsafe.Pointer(modifier)) - var cancellable *C.GCancellable - defer C.free(unsafe.Pointer(cancellable)) - - cpath := C.CString(commitPath) - defer C.free(unsafe.Pointer(cpath)) - csubject := C.CString(options.Subject) - defer C.free(unsafe.Pointer(csubject)) - cbody := C.CString(options.Body) - defer C.free(unsafe.Pointer(cbody)) - cbranch := C.CString(branch) - defer C.free(unsafe.Pointer(cbranch)) - cparent := C.CString(options.Parent) - defer C.free(unsafe.Pointer(cparent)) - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) { - goto out - } - - // If the user provided a stat override file - if strings.Compare(options.StatOverrideFile, "") != 0 { - modeAdds = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full())) - if err = parseFileByLine(options.StatOverrideFile, handleStatOverrideLine, modeAdds, cancellable); err != nil { - goto out - } - } - - // If the user provided a skiplist file - if strings.Compare(options.SkipListFile, "") != 0 { - skipList = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full())) - if err = parseFileByLine(options.SkipListFile, handleSkipListline, skipList, cancellable); err != nil { - goto out - } - } - - if options.AddMetadataString != nil { - metadata, err = parseKeyValueStrings(options.AddMetadataString) - if err != nil { - goto out - } - } - - if options.AddDetachedMetadataString != nil { - _, err = parseKeyValueStrings(options.AddDetachedMetadataString) - if err != nil { - goto out - } - } - - if strings.Compare(branch, "") == 0 && !options.Orphan { - err = errors.New("A branch must be specified or use commitOptions.Orphan") - goto out - } - - if options.NoXattrs { - C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_SKIP_XATTRS) - } - if options.GenerateSizes { - C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_GENERATE_SIZES) - } - if !options.Fsync { - C.ostree_repo_set_disable_fsync(repo.native(), C.TRUE) - } - - if flags != 0 || options.OwnerUID >= 0 || options.OwnerGID >= 0 || strings.Compare(options.StatOverrideFile, "") != 0 || options.NoXattrs { - filter_data.mode_adds = (*C.GHashTable)(modeAdds.Ptr()) - filter_data.skip_list = (*C.GHashTable)(skipList.Ptr()) - C._set_owner_uid((C.guint32)(options.OwnerUID)) - C._set_owner_gid((C.guint32)(options.OwnerGID)) - modifier = C._ostree_repo_commit_modifier_new_wrapper(flags, C.gpointer(&filter_data), nil) - } - - if strings.Compare(options.Parent, "") != 0 { - if strings.Compare(options.Parent, "none") == 0 { - options.Parent = "" - } - } else if !options.Orphan { - cerr = nil - if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.TRUE, &cparent, &cerr))) { - goto out - } - } - - if options.LinkCheckoutSpeedup && !glib.GoBool(glib.GBoolean(C.ostree_repo_scan_hardlinks(repo.native(), cancellable, &cerr))) { - goto out - } - - mtree = C.ostree_mutable_tree_new() - - if len(commitPath) == 0 && (len(options.Tree) == 0 || len(options.Tree[0]) == 0) { - currentDir := (*C.char)(C.g_get_current_dir()) - objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(currentDir))) - C.g_free(C.gpointer(currentDir)) - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { - goto out - } - } else if len(options.Tree) != 0 { - var eq int = -1 - cerr = nil - for tree := range options.Tree { - eq = strings.Index(options.Tree[tree], "=") - if eq == -1 { - C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(options.Tree[tree])) - goto out - } - treeType := options.Tree[tree][:eq] - treeVal := options.Tree[tree][eq+1:] - - if strings.Compare(treeType, "dir") == 0 { - objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal)))) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { - goto out - } - } else if strings.Compare(treeType, "tar") == 0 { - objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal)))) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_archive_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, (C.gboolean)(glib.GBool(opts.TarAutoCreateParents)), cancellable, &cerr))) { - fmt.Println("error 1") - goto out - } - } else if strings.Compare(treeType, "ref") == 0 { - if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), C.CString(treeVal), (**C.GFile)(objectToCommit.Ptr()), nil, cancellable, &cerr))) { - goto out - } - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { - goto out - } - } else { - C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(treeVal)) - goto out - } - } - } else { - objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(cpath))) - cerr = nil - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { - goto out - } - } - - if modeAdds != nil && C.g_hash_table_size((*C.GHashTable)(modeAdds.Ptr())) > 0 { - var hashIter *C.GHashTableIter - - var key, value C.gpointer - - C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(modeAdds.Ptr())) - - for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) { - C._g_printerr_onearg(C.CString("Unmatched StatOverride path: "), C._gptr_to_str(key)) - } - err = errors.New("Unmatched StatOverride paths") - C.free(unsafe.Pointer(hashIter)) - C.free(unsafe.Pointer(key)) - C.free(unsafe.Pointer(value)) - goto out - } - - if skipList != nil && C.g_hash_table_size((*C.GHashTable)(skipList.Ptr())) > 0 { - var hashIter *C.GHashTableIter - var key, value C.gpointer - - C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(skipList.Ptr())) - - for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) { - C._g_printerr_onearg(C.CString("Unmatched SkipList path: "), C._gptr_to_str(key)) - } - err = errors.New("Unmatched SkipList paths") - C.free(unsafe.Pointer(hashIter)) - C.free(unsafe.Pointer(key)) - C.free(unsafe.Pointer(value)) - goto out - } - - cerr = nil - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_mtree(repo.native(), mtree, &root, cancellable, &cerr))) { - goto out - } - - if options.SkipIfUnchanged && strings.Compare(options.Parent, "") != 0 { - var parentRoot *C.GFile - - cerr = nil - if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), cparent, &parentRoot, nil, cancellable, &cerr))) { - C.free(unsafe.Pointer(parentRoot)) - goto out - } - - if glib.GoBool(glib.GBoolean(C.g_file_equal(root, parentRoot))) { - skipCommit = true - } - C.free(unsafe.Pointer(parentRoot)) - } - - if !skipCommit { - var timestamp C.guint64 - - if options.Timestamp.IsZero() { - var now *C.GDateTime = C.g_date_time_new_now_utc() - timestamp = (C.guint64)(C.g_date_time_to_unix(now)) - C.g_date_time_unref(now) - - cerr = nil - ret := C.ostree_repo_write_commit(repo.native(), cparent, csubject, cbody, metadata, C._ostree_repo_file(root), &ccommitChecksum, cancellable, &cerr) - if !glib.GoBool(glib.GBoolean(ret)) { - goto out - } - } else { - timestamp = (C.guint64)(options.Timestamp.Unix()) - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_commit_with_time(repo.native(), cparent, csubject, cbody, - metadata, C._ostree_repo_file(root), timestamp, &ccommitChecksum, cancellable, &cerr))) { - goto out - } - } - - if detachedMetadata != nil { - C.ostree_repo_write_commit_detached_metadata(repo.native(), ccommitChecksum, detachedMetadata, cancellable, &cerr) - } - - if len(options.GpgSign) != 0 { - for key := range options.GpgSign { - if !glib.GoBool(glib.GBoolean(C.ostree_repo_sign_commit(repo.native(), (*C.gchar)(ccommitChecksum), (*C.gchar)(C.CString(options.GpgSign[key])), (*C.gchar)(C.CString(options.GpgHomedir)), cancellable, &cerr))) { - goto out - } - } - } - - if strings.Compare(branch, "") != 0 { - C.ostree_repo_transaction_set_ref(repo.native(), nil, cbranch, ccommitChecksum) - } else if !options.Orphan { - goto out - } else { - // TODO: Looks like I forgot to implement this. - } - } else { - ccommitChecksum = C.CString(options.Parent) - } - - return C.GoString(ccommitChecksum), nil -out: - if repo.native() != nil { - C.ostree_repo_abort_transaction(repo.native(), cancellable, nil) - //C.free(unsafe.Pointer(repo.native())) - } - if modifier != nil { - C.ostree_repo_commit_modifier_unref(modifier) - } - if err != nil { - return "", err - } - return "", generateError(cerr) -} - -// Parse an array of key value pairs of the format KEY=VALUE and add them to a GVariant -func parseKeyValueStrings(pairs []string) (*C.GVariant, error) { - builder := C.g_variant_builder_new(C._g_variant_type(C.CString("a{sv}"))) - defer C.g_variant_builder_unref(builder) - - for iter := range pairs { - index := strings.Index(pairs[iter], "=") - if index <= 0 { - var buffer bytes.Buffer - buffer.WriteString("Missing '=' in KEY=VALUE metadata '%s'") - buffer.WriteString(pairs[iter]) - return nil, errors.New(buffer.String()) - } - - key := C.CString(pairs[iter][:index]) - value := C.CString(pairs[iter][index+1:]) - - valueVariant := C.g_variant_new_string((*C.gchar)(value)) - - C._g_variant_builder_add_twoargs(builder, C.CString("{sv}"), key, valueVariant) - } - - metadata := C.g_variant_builder_end(builder) - return C.g_variant_ref_sink(metadata), nil -} - -// Parse a file linue by line and handle the line with the handleLineFunc -func parseFileByLine(path string, fn handleLineFunc, table *glib.GHashTable, cancellable *C.GCancellable) error { - var contents *C.char - var file *glib.GFile - var lines []string - var gerr = glib.NewGError() - cerr := (*C.GError)(gerr.Ptr()) - - file = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(path)))) - if !glib.GoBool(glib.GBoolean(C.g_file_load_contents((*C.GFile)(file.Ptr()), cancellable, &contents, nil, nil, &cerr))) { - return generateError(cerr) - } - - lines = strings.Split(C.GoString(contents), "\n") - for line := range lines { - if strings.Compare(lines[line], "") == 0 { - continue - } - - if err := fn(lines[line], table); err != nil { - return generateError(cerr) - } - } - return nil -} - -// Handle an individual line from a Statoverride file -func handleStatOverrideLine(line string, table *glib.GHashTable) error { - var space int - var modeAdd C.guint - - if space = strings.IndexRune(line, ' '); space == -1 { - return errors.New("Malformed StatOverrideFile (no space found)") - } - - modeAdd = (C.guint)(C.g_ascii_strtod((*C.gchar)(C.CString(line)), nil)) - C.g_hash_table_insert((*C.GHashTable)(table.Ptr()), C.gpointer(C.g_strdup((*C.gchar)(C.CString(line[space+1:])))), C._guint_to_pointer(modeAdd)) - - return nil -} - -// Handle an individual line from a Skiplist file -func handleSkipListline(line string, table *glib.GHashTable) error { - C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer(C.g_strdup((*C.gchar)(C.CString(line))))) - - return nil -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go deleted file mode 100644 index 6ee6671b..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go +++ /dev/null @@ -1,84 +0,0 @@ -package otbuiltin - -import ( - "strings" - "unsafe" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -// initOptions contains all of the options for initializing an ostree repo -// -// Note: while this is private, exported fields are public and part of the API. -type initOptions struct { - // Mode defines repository mode: either bare, archive-z2, or bare-user - Mode string -} - -// NewInitOptions instantiates and returns an initOptions struct with default values set -func NewInitOptions() initOptions { - return initOptions{ - Mode: "bare", - } -} - -// Init initializes a new ostree repository at the given path. Returns true -// if the repo exists at the location, regardless of whether it was initialized -// by the function or if it already existed. Returns an error if the repo could -// not be initialized -func Init(path string, options initOptions) (bool, error) { - repoMode, err := parseRepoMode(options.Mode) - if err != nil { - return false, err - } - - // Create a repo struct from the path - cpath := C.CString(path) - defer C.free(unsafe.Pointer(cpath)) - pathc := C.g_file_new_for_path(cpath) - defer C.g_object_unref(C.gpointer(pathc)) - repo := C.ostree_repo_new(pathc) - - // If the repo exists in the filesystem, return an error but set exists to true - /* var exists C.gboolean = 0 - success := glib.GoBool(glib.GBoolean(C.ostree_repo_exists(crepo, &exists, &cerr))) - if exists != 0 { - err = errors.New("repository already exists") - return true, err - } else if !success { - return false, generateError(cerr) - }*/ - - var cErr *C.GError - defer C.free(unsafe.Pointer(cErr)) - if r := C.ostree_repo_create(repo, repoMode, nil, &cErr); !isOk(r) { - err := generateError(cErr) - if strings.Contains(err.Error(), "File exists") { - return true, err - } - return false, err - } - return true, nil -} - -// parseRepoMode converts a mode string to a C.OSTREE_REPO_MODE enum value -func parseRepoMode(modeLabel string) (C.OstreeRepoMode, error) { - var cErr *C.GError - defer C.free(unsafe.Pointer(cErr)) - - cModeLabel := C.CString(modeLabel) - defer C.free(unsafe.Pointer(cModeLabel)) - - var retMode C.OstreeRepoMode - if r := C.ostree_repo_mode_from_string(cModeLabel, &retMode, &cErr); !isOk(r) { - // NOTE(lucab): zero-value for this C enum has no special/invalid meaning. - return C.OSTREE_REPO_MODE_BARE, generateError(cErr) - } - - return retMode, nil -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go deleted file mode 100644 index d5749821..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go +++ /dev/null @@ -1,163 +0,0 @@ -package otbuiltin - -import ( - "fmt" - "time" - "unsafe" - - glib "github.com/ostreedev/ostree-go/pkg/glibobject" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -// LogEntry is a struct for the various pieces of data in a log entry -type LogEntry struct { - Checksum []byte - Variant []byte - Timestamp time.Time - Subject string - Body string -} - -// Convert the log entry to a string -func (l LogEntry) String() string { - if len(l.Variant) == 0 { - return fmt.Sprintf("%s\n%s\n\n\t%s\n\n\t%s\n\n", l.Checksum, l.Timestamp, l.Subject, l.Body) - } - return fmt.Sprintf("%s\n%s\n\n", l.Checksum, l.Variant) -} - -type ostreeDumpFlags uint - -const ( - ostreeDumpNone ostreeDumpFlags = 0 - ostreeDumpRaw ostreeDumpFlags = 1 << iota -) - -// logOptions contains all of the options for initializing an ostree repo -type logOptions struct { - // Raw determines whether to show raw variant data - Raw bool -} - -// NewLogOptions instantiates and returns a logOptions struct with default values set -func NewLogOptions() logOptions { - return logOptions{} -} - -// Log shows the logs of a branch starting with a given commit or ref. Returns a -// slice of log entries on success and an error otherwise -func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) { - // attempt to open the repository - repo, err := OpenRepo(repoPath) - if err != nil { - return nil, err - } - - cbranch := C.CString(branch) - defer C.free(unsafe.Pointer(cbranch)) - var checksum *C.char - defer C.free(unsafe.Pointer(checksum)) - var cerr *C.GError - defer C.free(unsafe.Pointer(cerr)) - - flags := ostreeDumpNone - if options.Raw { - flags |= ostreeDumpRaw - } - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.FALSE, &checksum, &cerr))) { - return nil, generateError(cerr) - } - - return logCommit(repo, checksum, false, flags) -} - -func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags ostreeDumpFlags) ([]LogEntry, error) { - var variant *C.GVariant - var gerr = glib.NewGError() - var cerr = (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, &variant, &cerr))) { - if isRecursive && glib.GoBool(glib.GBoolean(C.g_error_matches(cerr, C.g_io_error_quark(), C.G_IO_ERROR_NOT_FOUND))) { - return nil, nil - } - return nil, generateError(cerr) - } - - // Get the parent of this commit - parent := (*C.char)(C.ostree_commit_get_parent(variant)) - defer C.free(unsafe.Pointer(parent)) - - entries := make([]LogEntry, 0, 1) - if parent != nil { - var err error - entries, err = logCommit(repo, parent, true, flags) - if err != nil { - return nil, err - } - } - - nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags) - entries = append(entries, nextLogEntry) - - return entries, nil -} - -func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags ostreeDumpFlags) LogEntry { - csum := []byte(C.GoString(checksum)) - - if (flags & ostreeDumpRaw) != 0 { - return dumpVariant(variant, csum) - } - - switch objectType { - case C.OSTREE_OBJECT_TYPE_COMMIT: - return dumpCommit(variant, flags, csum) - default: - return LogEntry{ - Checksum: csum, - } - } -} - -func dumpVariant(variant *C.GVariant, csum []byte) LogEntry { - var logVariant []byte - if C.G_BYTE_ORDER != C.G_BIG_ENDIAN { - byteswappedVariant := C.g_variant_byteswap(variant) - logVariant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE)))) - } else { - logVariant = []byte(C.GoString((*C.char)(C.g_variant_print(variant, C.TRUE)))) - } - - return LogEntry{ - Checksum: csum, - Variant: logVariant, - } -} - -func dumpCommit(variant *C.GVariant, flags ostreeDumpFlags, csum []byte) LogEntry { - var subject *C.char - defer C.free(unsafe.Pointer(subject)) - var body *C.char - defer C.free(unsafe.Pointer(body)) - var timeBigE C.guint64 - - C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, &timeBigE) - - // Translate to a host-endian epoch and convert to Go timestamp - timeHostE := C._guint64_from_be(timeBigE) - timestamp := time.Unix((int64)(timeHostE), 0) - - return LogEntry{ - Timestamp: timestamp, - Subject: C.GoString(subject), - Body: C.GoString(body), - } -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go deleted file mode 100644 index 532522fc..00000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go +++ /dev/null @@ -1,217 +0,0 @@ -package otbuiltin - -import ( - "bytes" - "errors" - "strconv" - "strings" - "time" - "unsafe" - - glib "github.com/ostreedev/ostree-go/pkg/glibobject" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -// Declare gobal variable for options -var pruneOpts pruneOptions - -// Contains all of the options for pruning an ostree repo. Use -// NewPruneOptions() to initialize -type pruneOptions struct { - NoPrune bool // Only display unreachable objects; don't delete - RefsOnly bool // Only compute reachability via refs - DeleteCommit string // Specify a commit to delete - KeepYoungerThan time.Time // All commits older than this date will be pruned - Depth int // Only traverse depths (integer) parents for each commit (default: -1=infinite) - StaticDeltasOnly int // Change the behavior of --keep-younger-than and --delete-commit to prune only the static delta files -} - -// Instantiates and returns a pruneOptions struct with default values set -func NewPruneOptions() pruneOptions { - po := new(pruneOptions) - po.Depth = -1 - return *po -} - -// Search for unreachable objects in the repository given by repoPath. Removes the -// objects unless pruneOptions.NoPrune is specified -func Prune(repoPath string, options pruneOptions) (string, error) { - pruneOpts = options - // attempt to open the repository - repo, err := OpenRepo(repoPath) - if err != nil { - return "", err - } - - var pruneFlags C.OstreeRepoPruneFlags - var numObjectsTotal int - var numObjectsPruned int - var objSizeTotal uint64 - var gerr = glib.NewGError() - var cerr = (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - var cancellable *glib.GCancellable - - if !pruneOpts.NoPrune && !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) { - return "", generateError(cerr) - } - - cerr = nil - if strings.Compare(pruneOpts.DeleteCommit, "") != 0 { - if pruneOpts.NoPrune { - return "", errors.New("Cannot specify both pruneOptions.DeleteCommit and pruneOptions.NoPrune") - } - - if pruneOpts.StaticDeltasOnly > 0 { - if glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), C.CString(pruneOpts.DeleteCommit), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return "", generateError(cerr) - } - } else if err = deleteCommit(repo, pruneOpts.DeleteCommit, cancellable); err != nil { - return "", err - } - } - - if !pruneOpts.KeepYoungerThan.IsZero() { - if pruneOpts.NoPrune { - return "", errors.New("Cannot specify both pruneOptions.KeepYoungerThan and pruneOptions.NoPrune") - } - - if err = pruneCommitsKeepYoungerThanDate(repo, pruneOpts.KeepYoungerThan, cancellable); err != nil { - return "", err - } - } - - if pruneOpts.RefsOnly { - pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_REFS_ONLY - } - if pruneOpts.NoPrune { - pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_NO_PRUNE - } - - formattedFreedSize := C.GoString((*C.char)(C.g_format_size_full((C.guint64)(objSizeTotal), 0))) - - var buffer bytes.Buffer - - buffer.WriteString("Total objects: ") - buffer.WriteString(strconv.Itoa(numObjectsTotal)) - if numObjectsPruned == 0 { - buffer.WriteString("\nNo unreachable objects") - } else if pruneOpts.NoPrune { - buffer.WriteString("\nWould delete: ") - buffer.WriteString(strconv.Itoa(numObjectsPruned)) - buffer.WriteString(" objects, freeing ") - buffer.WriteString(formattedFreedSize) - } else { - buffer.WriteString("\nDeleted ") - buffer.WriteString(strconv.Itoa(numObjectsPruned)) - buffer.WriteString(" objects, ") - buffer.WriteString(formattedFreedSize) - buffer.WriteString(" freed") - } - - return buffer.String(), nil -} - -// Delete an unreachable commit from the repo -func deleteCommit(repo *Repo, commitToDelete string, cancellable *glib.GCancellable) error { - var refs *glib.GHashTable - var hashIter glib.GHashTableIter - var hashkey, hashvalue C.gpointer - var gerr = glib.NewGError() - var cerr = (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - - if glib.GoBool(glib.GBoolean(C.ostree_repo_list_refs(repo.native(), nil, (**C.GHashTable)(refs.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - - C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(refs.Ptr())) - for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &hashkey, &hashvalue) != 0 { - var ref string = C.GoString((*C.char)(hashkey)) - var commit string = C.GoString((*C.char)(hashvalue)) - if strings.Compare(commitToDelete, commit) == 0 { - var buffer bytes.Buffer - buffer.WriteString("Commit ") - buffer.WriteString(commitToDelete) - buffer.WriteString(" is referenced by ") - buffer.WriteString(ref) - return errors.New(buffer.String()) - } - } - - if err := repo.enableTombstoneCommits(); err != nil { - return err - } - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, C.CString(commitToDelete), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - - return nil -} - -// Prune commits but keep any younger than the given date regardless of whether they -// are reachable -func pruneCommitsKeepYoungerThanDate(repo *Repo, date time.Time, cancellable *glib.GCancellable) error { - var objects *glib.GHashTable - defer C.free(unsafe.Pointer(objects)) - var hashIter glib.GHashTableIter - var key, value C.gpointer - defer C.free(unsafe.Pointer(key)) - defer C.free(unsafe.Pointer(value)) - var gerr = glib.NewGError() - var cerr = (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - - if err := repo.enableTombstoneCommits(); err != nil { - return err - } - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_list_objects(repo.native(), C.OSTREE_REPO_LIST_OBJECTS_ALL, (**C.GHashTable)(objects.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - - C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(objects.Ptr())) - for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &key, &value) != 0 { - var serializedKey *glib.GVariant - defer C.free(unsafe.Pointer(serializedKey)) - var checksum *C.char - defer C.free(unsafe.Pointer(checksum)) - var objType C.OstreeObjectType - var commitTimestamp uint64 - var commit *glib.GVariant = nil - - C.ostree_object_name_deserialize((*C.GVariant)(serializedKey.Ptr()), &checksum, &objType) - - if objType != C.OSTREE_OBJECT_TYPE_COMMIT { - continue - } - - cerr = nil - if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (**C.GVariant)(commit.Ptr()), &cerr))) { - return generateError(cerr) - } - - commitTimestamp = (uint64)(C.ostree_commit_get_timestamp((*C.GVariant)(commit.Ptr()))) - if commitTimestamp < (uint64)(date.Unix()) { - cerr = nil - if pruneOpts.StaticDeltasOnly != 0 { - if !glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - } else { - if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - } - } - } - - return nil -} diff --git a/vendor/github.com/proglottis/gpgme/.gitignore b/vendor/github.com/proglottis/gpgme/.gitignore index 0210b26e..bc485bfc 100644 --- a/vendor/github.com/proglottis/gpgme/.gitignore +++ b/vendor/github.com/proglottis/gpgme/.gitignore @@ -1 +1,3 @@ testdata/gpghome/random_seed +testdata/gpghome/.gpg-v21-migrated +testdata/gpghome/private-keys-v1.d/ diff --git a/vendor/github.com/proglottis/gpgme/callbacks.go b/vendor/github.com/proglottis/gpgme/callbacks.go deleted file mode 100644 index d1dc610d..00000000 --- a/vendor/github.com/proglottis/gpgme/callbacks.go +++ /dev/null @@ -1,42 +0,0 @@ -package gpgme - -import ( - "sync" -) - -var callbacks struct { - sync.Mutex - m map[uintptr]interface{} - c uintptr -} - -func callbackAdd(v interface{}) uintptr { - callbacks.Lock() - defer callbacks.Unlock() - if callbacks.m == nil { - callbacks.m = make(map[uintptr]interface{}) - } - callbacks.c++ - ret := callbacks.c - callbacks.m[ret] = v - return ret -} - -func callbackLookup(c uintptr) interface{} { - callbacks.Lock() - defer callbacks.Unlock() - ret := callbacks.m[c] - if ret == nil { - panic("callback pointer not found") - } - return ret -} - -func callbackDelete(c uintptr) { - callbacks.Lock() - defer callbacks.Unlock() - if callbacks.m[c] == nil { - panic("callback pointer not found") - } - delete(callbacks.m, c) -} diff --git a/vendor/github.com/proglottis/gpgme/data.go b/vendor/github.com/proglottis/gpgme/data.go index eee32c03..0e81c36d 100644 --- a/vendor/github.com/proglottis/gpgme/data.go +++ b/vendor/github.com/proglottis/gpgme/data.go @@ -10,6 +10,7 @@ import ( "io" "os" "runtime" + "runtime/cgo" "unsafe" ) @@ -19,30 +20,32 @@ const ( SeekEnd = C.SEEK_END ) +var dataCallbacks = C.struct_gpgme_data_cbs{ + read: C.gpgme_data_read_cb_t(C.gogpgme_readfunc), + write: C.gpgme_data_write_cb_t(C.gogpgme_writefunc), + seek: C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc), +} + //export gogpgme_readfunc func gogpgme_readfunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { - d := callbackLookup(uintptr(handle)).(*Data) - if len(d.buf) < int(size) { - d.buf = make([]byte, size) - } - n, err := d.r.Read(d.buf[:size]) + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) + n, err := d.r.Read(unsafe.Slice((*byte)(buffer), size)) if err != nil && err != io.EOF { + d.err = err C.gpgme_err_set_errno(C.EIO) return -1 } - C.memcpy(buffer, unsafe.Pointer(&d.buf[0]), C.size_t(n)) return C.ssize_t(n) } //export gogpgme_writefunc func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { - d := callbackLookup(uintptr(handle)).(*Data) - if len(d.buf) < int(size) { - d.buf = make([]byte, size) - } - C.memcpy(unsafe.Pointer(&d.buf[0]), buffer, C.size_t(size)) - n, err := d.w.Write(d.buf[:size]) + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) + n, err := d.w.Write(unsafe.Slice((*byte)(buffer), size)) if err != nil && err != io.EOF { + d.err = err C.gpgme_err_set_errno(C.EIO) return -1 } @@ -51,9 +54,11 @@ func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { //export gogpgme_seekfunc func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) C.gpgme_off_t { - d := callbackLookup(uintptr(handle)).(*Data) + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) n, err := d.s.Seek(int64(offset), int(whence)) if err != nil { + d.err = err C.gpgme_err_set_errno(C.EIO) return -1 } @@ -63,12 +68,11 @@ func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) // The Data buffer used to communicate with GPGME type Data struct { dh C.gpgme_data_t // WARNING: Call runtime.KeepAlive(d) after ANY passing of d.dh to C - buf []byte - cbs C.struct_gpgme_data_cbs r io.Reader w io.Writer s io.Seeker - cbc uintptr // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh) + cbc cgo.Handle // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh) + err error } func newData() *Data { @@ -86,6 +90,7 @@ func NewData() (*Data, error) { // NewDataFile returns a new file based data buffer func NewDataFile(f *os.File) (*Data, error) { d := newData() + d.r = f return d, handleError(C.gpgme_data_new_from_fd(&d.dh, C.int(f.Fd()))) } @@ -103,20 +108,22 @@ func NewDataBytes(b []byte) (*Data, error) { func NewDataReader(r io.Reader) (*Data, error) { d := newData() d.r = r - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + if s, ok := r.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // NewDataWriter returns a new callback based data buffer func NewDataWriter(w io.Writer) (*Data, error) { d := newData() d.w = w - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + if s, ok := w.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // NewDataReadWriter returns a new callback based data buffer @@ -124,11 +131,11 @@ func NewDataReadWriter(rw io.ReadWriter) (*Data, error) { d := newData() d.r = rw d.w = rw - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + if s, ok := rw.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // NewDataReadWriteSeeker returns a new callback based data buffer @@ -137,12 +144,8 @@ func NewDataReadWriteSeeker(rw io.ReadWriteSeeker) (*Data, error) { d.r = rw d.w = rw d.s = rw - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - d.cbs.seek = C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // Close releases any resources associated with the data buffer @@ -151,7 +154,7 @@ func (d *Data) Close() error { return nil } if d.cbc > 0 { - callbackDelete(d.cbc) + d.cbc.Delete() } _, err := C.gpgme_data_release(d.dh) runtime.KeepAlive(d) @@ -160,24 +163,42 @@ func (d *Data) Close() error { } func (d *Data) Write(p []byte) (int, error) { - n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) + var buffer *byte + if len(p) > 0 { + buffer = &p[0] + } + + n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(buffer), C.size_t(len(p))) runtime.KeepAlive(d) - if err != nil { + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: return 0, err - } - if n == 0 { + case len(p) > 0 && n == 0: return 0, io.EOF } return int(n), nil } func (d *Data) Read(p []byte) (int, error) { - n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) + var buffer *byte + if len(p) > 0 { + buffer = &p[0] + } + + n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(buffer), C.size_t(len(p))) runtime.KeepAlive(d) - if err != nil { + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: return 0, err - } - if n == 0 { + case len(p) > 0 && n == 0: return 0, io.EOF } return int(n), nil @@ -186,7 +207,15 @@ func (d *Data) Read(p []byte) (int, error) { func (d *Data) Seek(offset int64, whence int) (int64, error) { n, err := C.gogpgme_data_seek(d.dh, C.gpgme_off_t(offset), C.int(whence)) runtime.KeepAlive(d) - return int64(n), err + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: + return 0, err + } + return int64(n), nil } // Name returns the associated filename if any diff --git a/vendor/github.com/proglottis/gpgme/go_gpgme.c b/vendor/github.com/proglottis/gpgme/go_gpgme.c index 00da3ab3..f5b38ce7 100644 --- a/vendor/github.com/proglottis/gpgme/go_gpgme.c +++ b/vendor/github.com/proglottis/gpgme/go_gpgme.c @@ -1,13 +1,5 @@ #include "go_gpgme.h" -gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle) { - return gpgme_data_new_from_cbs(dh, cbs, (void *)handle); -} - -void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle) { - gpgme_set_passphrase_cb(ctx, cb, (void *)handle); -} - gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) { return gpgme_data_seek(dh, offset, whence); } @@ -15,17 +7,17 @@ gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) { gpgme_error_t gogpgme_op_assuan_transact_ext( gpgme_ctx_t ctx, char* cmd, - uintptr_t data_h, - uintptr_t inquiry_h, - uintptr_t status_h, + void* data_h, + void* inquiry_h, + void* status_h, gpgme_error_t *operr ){ return gpgme_op_assuan_transact_ext( ctx, cmd, - (gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, (void *)data_h, - (gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, (void *)inquiry_h, - (gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, (void *)status_h, + (gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, data_h, + (gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, inquiry_h, + (gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, status_h, operr ); } diff --git a/vendor/github.com/proglottis/gpgme/go_gpgme.h b/vendor/github.com/proglottis/gpgme/go_gpgme.h index eb3a4ba8..8eb5fddd 100644 --- a/vendor/github.com/proglottis/gpgme/go_gpgme.h +++ b/vendor/github.com/proglottis/gpgme/go_gpgme.h @@ -10,11 +10,9 @@ extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd); -extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle); -extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle); extern gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence); -extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, uintptr_t data_h, uintptr_t inquiry_h , uintptr_t status_h, gpgme_error_t *operr); +extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, void *data_h, void *inquiry_h , void *status_h, gpgme_error_t *operr); extern gpgme_error_t gogpgme_assuan_data_callback(void *opaque, void* data, size_t datalen ); extern gpgme_error_t gogpgme_assuan_inquiry_callback(void *opaque, char* name, char* args); diff --git a/vendor/github.com/proglottis/gpgme/gpgme.go b/vendor/github.com/proglottis/gpgme/gpgme.go index 8f495ddf..15af69c8 100644 --- a/vendor/github.com/proglottis/gpgme/gpgme.go +++ b/vendor/github.com/proglottis/gpgme/gpgme.go @@ -7,11 +7,13 @@ package gpgme // #include // #include "go_gpgme.h" import "C" + import ( "fmt" "io" "os" "runtime" + "runtime/cgo" "time" "unsafe" ) @@ -27,7 +29,8 @@ type Callback func(uidHint string, prevWasBad bool, f *os.File) error //export gogpgme_passfunc func gogpgme_passfunc(hook unsafe.Pointer, uid_hint, passphrase_info *C.char, prev_was_bad, fd C.int) C.gpgme_error_t { - c := callbackLookup(uintptr(hook)).(*Context) + h := *(*cgo.Handle)(hook) + c := h.Value().(*Context) go_uid_hint := C.GoString(uid_hint) f := os.NewFile(uintptr(fd), go_uid_hint) defer f.Close() @@ -233,6 +236,17 @@ func SetEngineInfo(proto Protocol, fileName, homeDir string) error { return handleError(C.gpgme_set_engine_info(C.gpgme_protocol_t(proto), cfn, chome)) } +func GetDirInfo(what string) string { + cwhat := C.CString(what) + defer C.free(unsafe.Pointer(cwhat)) + + cdir := C.gpgme_get_dirinfo(cwhat) + if cdir == nil { + return "" + } + return C.GoString(cdir) +} + func FindKeys(pattern string, secretOnly bool) ([]*Key, error) { var keys []*Key ctx, err := New() @@ -243,7 +257,7 @@ func FindKeys(pattern string, secretOnly bool) ([]*Key, error) { if err := ctx.KeyListStart(pattern, secretOnly); err != nil { return keys, err } - defer ctx.KeyListEnd() + defer func() { _ = ctx.KeyListEnd() }() for ctx.KeyListNext() { keys = append(keys, ctx.Key) } @@ -268,8 +282,10 @@ func Decrypt(r io.Reader) (*Data, error) { if err != nil { return nil, err } - err = ctx.Decrypt(cipher, plain) - plain.Seek(0, SeekSet) + if err := ctx.Decrypt(cipher, plain); err != nil { + return nil, err + } + _, err = plain.Seek(0, SeekSet) return plain, err } @@ -278,7 +294,7 @@ type Context struct { KeyError error callback Callback - cbc uintptr // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx) + cbc cgo.Handle // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx) ctx C.gpgme_ctx_t // WARNING: Call runtime.KeepAlive(c) after ANY passing of c.ctx to C } @@ -295,7 +311,7 @@ func (c *Context) Release() { return } if c.cbc > 0 { - callbackDelete(c.cbc) + c.cbc.Delete() } C.gpgme_release(c.ctx) runtime.KeepAlive(c) @@ -364,15 +380,14 @@ func (c *Context) SetCallback(callback Callback) error { var err error c.callback = callback if c.cbc > 0 { - callbackDelete(c.cbc) + c.cbc.Delete() } if callback != nil { - cbc := callbackAdd(c) - c.cbc = cbc - _, err = C.gogpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), C.uintptr_t(cbc)) + c.cbc = cgo.NewHandle(c) + _, err = C.gpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), unsafe.Pointer(&c.cbc)) } else { c.cbc = 0 - _, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0) + _, err = C.gpgme_set_passphrase_cb(c.ctx, nil, nil) } runtime.KeepAlive(c) return err @@ -564,9 +579,11 @@ func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error { return err } -type AssuanDataCallback func(data []byte) error -type AssuanInquireCallback func(name, args string) error -type AssuanStatusCallback func(status, args string) error +type ( + AssuanDataCallback func(data []byte) error + AssuanInquireCallback func(name, args string) error + AssuanStatusCallback func(status, args string) error +) // AssuanSend sends a raw Assuan command to gpg-agent func (c *Context) AssuanSend( @@ -577,17 +594,17 @@ func (c *Context) AssuanSend( ) error { var operr C.gpgme_error_t - dataPtr := callbackAdd(&data) - inquiryPtr := callbackAdd(&inquiry) - statusPtr := callbackAdd(&status) + dataPtr := cgo.NewHandle(&data) + inquiryPtr := cgo.NewHandle(&inquiry) + statusPtr := cgo.NewHandle(&status) cmdCStr := C.CString(cmd) defer C.free(unsafe.Pointer(cmdCStr)) err := C.gogpgme_op_assuan_transact_ext( c.ctx, cmdCStr, - C.uintptr_t(dataPtr), - C.uintptr_t(inquiryPtr), - C.uintptr_t(statusPtr), + unsafe.Pointer(&dataPtr), + unsafe.Pointer(&inquiryPtr), + unsafe.Pointer(&statusPtr), &operr, ) runtime.KeepAlive(c) @@ -600,11 +617,14 @@ func (c *Context) AssuanSend( //export gogpgme_assuan_data_callback func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, datalen C.size_t) C.gpgme_error_t { - c := callbackLookup(uintptr(handle)).(*AssuanDataCallback) + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanDataCallback) if *c == nil { return 0 } - (*c)(C.GoBytes(data, C.int(datalen))) + if err := (*c)(C.GoBytes(data, C.int(datalen))); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } return 0 } @@ -612,11 +632,14 @@ func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, da func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs *C.char) C.gpgme_error_t { name := C.GoString(cName) args := C.GoString(cArgs) - c := callbackLookup(uintptr(handle)).(*AssuanInquireCallback) + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanInquireCallback) if *c == nil { return 0 } - (*c)(name, args) + if err := (*c)(name, args); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } return 0 } @@ -624,11 +647,14 @@ func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs func gogpgme_assuan_status_callback(handle unsafe.Pointer, cStatus *C.char, cArgs *C.char) C.gpgme_error_t { status := C.GoString(cStatus) args := C.GoString(cArgs) - c := callbackLookup(uintptr(handle)).(*AssuanStatusCallback) + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanStatusCallback) if *c == nil { return 0 } - (*c)(status, args) + if err := (*c)(status, args); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } return 0 } diff --git a/vendor/github.com/proglottis/gpgme/unset_agent_info.go b/vendor/github.com/proglottis/gpgme/unset_agent_info.go index 986aca59..8add8ec8 100644 --- a/vendor/github.com/proglottis/gpgme/unset_agent_info.go +++ b/vendor/github.com/proglottis/gpgme/unset_agent_info.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package gpgme diff --git a/vendor/github.com/sigstore/rekor/COPYRIGHT.txt b/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt similarity index 93% rename from vendor/github.com/sigstore/rekor/COPYRIGHT.txt rename to vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt index 7a01c849..8b16ae01 100644 --- a/vendor/github.com/sigstore/rekor/COPYRIGHT.txt +++ b/vendor/github.com/sigstore/protobuf-specs/COPYRIGHT.txt @@ -1,5 +1,5 @@ -Copyright 2021 The Sigstore Authors. +Copyright 2022 The Sigstore Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/sigstore/protobuf-specs/LICENSE similarity index 100% rename from vendor/github.com/go-openapi/analysis/LICENSE rename to vendor/github.com/sigstore/protobuf-specs/LICENSE diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go new file mode 100644 index 00000000..40725bd7 --- /dev/null +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go @@ -0,0 +1,1273 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v5.29.4 +// source: sigstore_common.proto + +package v1 + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Only a subset of the secure hash standard algorithms are supported. +// See for more +// details. +// UNSPECIFIED SHOULD not be used, primary reason for inclusion is to force +// any proto JSON serialization to emit the used hash algorithm, as default +// option is to *omit* the default value of an enum (which is the first +// value, represented by '0'. +type HashAlgorithm int32 + +const ( + HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED HashAlgorithm = 0 + HashAlgorithm_SHA2_256 HashAlgorithm = 1 + HashAlgorithm_SHA2_384 HashAlgorithm = 2 + HashAlgorithm_SHA2_512 HashAlgorithm = 3 + HashAlgorithm_SHA3_256 HashAlgorithm = 4 + HashAlgorithm_SHA3_384 HashAlgorithm = 5 +) + +// Enum value maps for HashAlgorithm. +var ( + HashAlgorithm_name = map[int32]string{ + 0: "HASH_ALGORITHM_UNSPECIFIED", + 1: "SHA2_256", + 2: "SHA2_384", + 3: "SHA2_512", + 4: "SHA3_256", + 5: "SHA3_384", + } + HashAlgorithm_value = map[string]int32{ + "HASH_ALGORITHM_UNSPECIFIED": 0, + "SHA2_256": 1, + "SHA2_384": 2, + "SHA2_512": 3, + "SHA3_256": 4, + "SHA3_384": 5, + } +) + +func (x HashAlgorithm) Enum() *HashAlgorithm { + p := new(HashAlgorithm) + *p = x + return p +} + +func (x HashAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HashAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[0].Descriptor() +} + +func (HashAlgorithm) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[0] +} + +func (x HashAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HashAlgorithm.Descriptor instead. +func (HashAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{0} +} + +// Details of a specific public key, capturing the the key encoding method, +// and signature algorithm. +// +// PublicKeyDetails captures the public key/hash algorithm combinations +// recommended in the Sigstore ecosystem. +// +// This is modelled as a linear set as we want to provide a small number of +// opinionated options instead of allowing every possible permutation. +// +// Any changes to this enum MUST be reflected in the algorithm registry. +// See: docs/algorithm-registry.md +// +// To avoid the possibility of contradicting formats such as PKCS1 with +// ED25519 the valid permutations are listed as a linear set instead of a +// cartesian set (i.e one combined variable instead of two, one for encoding +// and one for the signature algorithm). +type PublicKeyDetails int32 + +const ( + PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED PublicKeyDetails = 0 + // RSA + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKCS1_RSA_PKCS1V5 PublicKeyDetails = 1 // See RFC8017 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKCS1_RSA_PSS PublicKeyDetails = 2 // See RFC8017 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_RSA_PKCS1V5 PublicKeyDetails = 3 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_RSA_PSS PublicKeyDetails = 4 + // RSA public key in PKIX format, PKCS#1v1.5 signature + PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 PublicKeyDetails = 9 + PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 PublicKeyDetails = 10 + PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 PublicKeyDetails = 11 + // RSA public key in PKIX format, RSASSA-PSS signature + PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256 PublicKeyDetails = 16 // See RFC4055 + PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256 PublicKeyDetails = 17 + PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256 PublicKeyDetails = 18 + // ECDSA + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_ECDSA_P256_HMAC_SHA_256 PublicKeyDetails = 6 // See RFC6979 + PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 PublicKeyDetails = 5 // See NIST FIPS 186-4 + PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 PublicKeyDetails = 12 + PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 PublicKeyDetails = 13 + // Ed 25519 + PublicKeyDetails_PKIX_ED25519 PublicKeyDetails = 7 // See RFC8032 + PublicKeyDetails_PKIX_ED25519_PH PublicKeyDetails = 8 + // These algorithms are deprecated and should not be used, but they + // were/are being used by most Sigstore clients implementations. + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_ECDSA_P384_SHA_256 PublicKeyDetails = 19 + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_PKIX_ECDSA_P521_SHA_256 PublicKeyDetails = 20 + // LMS and LM-OTS + // + // These keys and signatures may be used by private Sigstore + // deployments, but are not currently supported by the public + // good instance. + // + // USER WARNING: LMS and LM-OTS are both stateful signature schemes. + // Using them correctly requires discretion and careful consideration + // to ensure that individual secret keys are not used more than once. + // In addition, LM-OTS is a single-use scheme, meaning that it + // MUST NOT be used for more than one signature per LM-OTS key. + // If you cannot maintain these invariants, you MUST NOT use these + // schemes. + PublicKeyDetails_LMS_SHA256 PublicKeyDetails = 14 + PublicKeyDetails_LMOTS_SHA256 PublicKeyDetails = 15 +) + +// Enum value maps for PublicKeyDetails. +var ( + PublicKeyDetails_name = map[int32]string{ + 0: "PUBLIC_KEY_DETAILS_UNSPECIFIED", + 1: "PKCS1_RSA_PKCS1V5", + 2: "PKCS1_RSA_PSS", + 3: "PKIX_RSA_PKCS1V5", + 4: "PKIX_RSA_PSS", + 9: "PKIX_RSA_PKCS1V15_2048_SHA256", + 10: "PKIX_RSA_PKCS1V15_3072_SHA256", + 11: "PKIX_RSA_PKCS1V15_4096_SHA256", + 16: "PKIX_RSA_PSS_2048_SHA256", + 17: "PKIX_RSA_PSS_3072_SHA256", + 18: "PKIX_RSA_PSS_4096_SHA256", + 6: "PKIX_ECDSA_P256_HMAC_SHA_256", + 5: "PKIX_ECDSA_P256_SHA_256", + 12: "PKIX_ECDSA_P384_SHA_384", + 13: "PKIX_ECDSA_P521_SHA_512", + 7: "PKIX_ED25519", + 8: "PKIX_ED25519_PH", + 19: "PKIX_ECDSA_P384_SHA_256", + 20: "PKIX_ECDSA_P521_SHA_256", + 14: "LMS_SHA256", + 15: "LMOTS_SHA256", + } + PublicKeyDetails_value = map[string]int32{ + "PUBLIC_KEY_DETAILS_UNSPECIFIED": 0, + "PKCS1_RSA_PKCS1V5": 1, + "PKCS1_RSA_PSS": 2, + "PKIX_RSA_PKCS1V5": 3, + "PKIX_RSA_PSS": 4, + "PKIX_RSA_PKCS1V15_2048_SHA256": 9, + "PKIX_RSA_PKCS1V15_3072_SHA256": 10, + "PKIX_RSA_PKCS1V15_4096_SHA256": 11, + "PKIX_RSA_PSS_2048_SHA256": 16, + "PKIX_RSA_PSS_3072_SHA256": 17, + "PKIX_RSA_PSS_4096_SHA256": 18, + "PKIX_ECDSA_P256_HMAC_SHA_256": 6, + "PKIX_ECDSA_P256_SHA_256": 5, + "PKIX_ECDSA_P384_SHA_384": 12, + "PKIX_ECDSA_P521_SHA_512": 13, + "PKIX_ED25519": 7, + "PKIX_ED25519_PH": 8, + "PKIX_ECDSA_P384_SHA_256": 19, + "PKIX_ECDSA_P521_SHA_256": 20, + "LMS_SHA256": 14, + "LMOTS_SHA256": 15, + } +) + +func (x PublicKeyDetails) Enum() *PublicKeyDetails { + p := new(PublicKeyDetails) + *p = x + return p +} + +func (x PublicKeyDetails) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PublicKeyDetails) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[1].Descriptor() +} + +func (PublicKeyDetails) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[1] +} + +func (x PublicKeyDetails) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PublicKeyDetails.Descriptor instead. +func (PublicKeyDetails) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{1} +} + +type SubjectAlternativeNameType int32 + +const ( + SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED SubjectAlternativeNameType = 0 + SubjectAlternativeNameType_EMAIL SubjectAlternativeNameType = 1 + SubjectAlternativeNameType_URI SubjectAlternativeNameType = 2 + // OID 1.3.6.1.4.1.57264.1.7 + // See https://github.com/sigstore/fulcio/blob/main/docs/oid-info.md#1361415726417--othername-san + // for more details. + SubjectAlternativeNameType_OTHER_NAME SubjectAlternativeNameType = 3 +) + +// Enum value maps for SubjectAlternativeNameType. +var ( + SubjectAlternativeNameType_name = map[int32]string{ + 0: "SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED", + 1: "EMAIL", + 2: "URI", + 3: "OTHER_NAME", + } + SubjectAlternativeNameType_value = map[string]int32{ + "SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED": 0, + "EMAIL": 1, + "URI": 2, + "OTHER_NAME": 3, + } +) + +func (x SubjectAlternativeNameType) Enum() *SubjectAlternativeNameType { + p := new(SubjectAlternativeNameType) + *p = x + return p +} + +func (x SubjectAlternativeNameType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubjectAlternativeNameType) Descriptor() protoreflect.EnumDescriptor { + return file_sigstore_common_proto_enumTypes[2].Descriptor() +} + +func (SubjectAlternativeNameType) Type() protoreflect.EnumType { + return &file_sigstore_common_proto_enumTypes[2] +} + +func (x SubjectAlternativeNameType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubjectAlternativeNameType.Descriptor instead. +func (SubjectAlternativeNameType) EnumDescriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{2} +} + +// HashOutput captures a digest of a 'message' (generic octet sequence) +// and the corresponding hash algorithm used. +type HashOutput struct { + state protoimpl.MessageState `protogen:"open.v1"` + Algorithm HashAlgorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"algorithm,omitempty"` + // This is the raw octets of the message digest as computed by + // the hash algorithm. + Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HashOutput) Reset() { + *x = HashOutput{} + mi := &file_sigstore_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HashOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashOutput) ProtoMessage() {} + +func (x *HashOutput) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashOutput.ProtoReflect.Descriptor instead. +func (*HashOutput) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{0} +} + +func (x *HashOutput) GetAlgorithm() HashAlgorithm { + if x != nil { + return x.Algorithm + } + return HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED +} + +func (x *HashOutput) GetDigest() []byte { + if x != nil { + return x.Digest + } + return nil +} + +// MessageSignature stores the computed signature over a message. +type MessageSignature struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Message digest can be used to identify the artifact. + // Clients MUST NOT attempt to use this digest to verify the associated + // signature; it is intended solely for identification. + MessageDigest *HashOutput `protobuf:"bytes,1,opt,name=message_digest,json=messageDigest,proto3" json:"message_digest,omitempty"` + // The raw bytes as returned from the signature algorithm. + // The signature algorithm (and so the format of the signature bytes) + // are determined by the contents of the 'verification_material', + // either a key-pair or a certificate. If using a certificate, the + // certificate contains the required information on the signature + // algorithm. + // When using a key pair, the algorithm MUST be part of the public + // key, which MUST be communicated out-of-band. + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageSignature) Reset() { + *x = MessageSignature{} + mi := &file_sigstore_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageSignature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageSignature) ProtoMessage() {} + +func (x *MessageSignature) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageSignature.ProtoReflect.Descriptor instead. +func (*MessageSignature) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{1} +} + +func (x *MessageSignature) GetMessageDigest() *HashOutput { + if x != nil { + return x.MessageDigest + } + return nil +} + +func (x *MessageSignature) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +// LogId captures the identity of a transparency log. +type LogId struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The unique identity of the log, represented by its public key. + KeyId []byte `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LogId) Reset() { + *x = LogId{} + mi := &file_sigstore_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LogId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogId) ProtoMessage() {} + +func (x *LogId) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogId.ProtoReflect.Descriptor instead. +func (*LogId) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{2} +} + +func (x *LogId) GetKeyId() []byte { + if x != nil { + return x.KeyId + } + return nil +} + +// This message holds a RFC 3161 timestamp. +type RFC3161SignedTimestamp struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Signed timestamp is the DER encoded TimeStampResponse. + // See https://www.rfc-editor.org/rfc/rfc3161.html#section-2.4.2 + SignedTimestamp []byte `protobuf:"bytes,1,opt,name=signed_timestamp,json=signedTimestamp,proto3" json:"signed_timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RFC3161SignedTimestamp) Reset() { + *x = RFC3161SignedTimestamp{} + mi := &file_sigstore_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RFC3161SignedTimestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RFC3161SignedTimestamp) ProtoMessage() {} + +func (x *RFC3161SignedTimestamp) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RFC3161SignedTimestamp.ProtoReflect.Descriptor instead. +func (*RFC3161SignedTimestamp) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{3} +} + +func (x *RFC3161SignedTimestamp) GetSignedTimestamp() []byte { + if x != nil { + return x.SignedTimestamp + } + return nil +} + +type PublicKey struct { + state protoimpl.MessageState `protogen:"open.v1"` + // DER-encoded public key, encoding method is specified by the + // key_details attribute. + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3,oneof" json:"raw_bytes,omitempty"` + // Key encoding and signature algorithm to use for this key. + KeyDetails PublicKeyDetails `protobuf:"varint,2,opt,name=key_details,json=keyDetails,proto3,enum=dev.sigstore.common.v1.PublicKeyDetails" json:"key_details,omitempty"` + // Optional validity period for this key, *inclusive* of the endpoints. + ValidFor *TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3,oneof" json:"valid_for,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublicKey) Reset() { + *x = PublicKey{} + mi := &file_sigstore_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKey) ProtoMessage() {} + +func (x *PublicKey) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. +func (*PublicKey) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{4} +} + +func (x *PublicKey) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +func (x *PublicKey) GetKeyDetails() PublicKeyDetails { + if x != nil { + return x.KeyDetails + } + return PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED +} + +func (x *PublicKey) GetValidFor() *TimeRange { + if x != nil { + return x.ValidFor + } + return nil +} + +// PublicKeyIdentifier can be used to identify an (out of band) delivered +// key, to verify a signature. +type PublicKeyIdentifier struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Optional unauthenticated hint on which key to use. + // The format of the hint must be agreed upon out of band by the + // signer and the verifiers, and so is not subject to this + // specification. + // Example use-case is to specify the public key to use, from a + // trusted key-ring. + // Implementors are RECOMMENDED to derive the value from the public + // key as described in RFC 6962. + // See: + Hint string `protobuf:"bytes,1,opt,name=hint,proto3" json:"hint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublicKeyIdentifier) Reset() { + *x = PublicKeyIdentifier{} + mi := &file_sigstore_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublicKeyIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKeyIdentifier) ProtoMessage() {} + +func (x *PublicKeyIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKeyIdentifier.ProtoReflect.Descriptor instead. +func (*PublicKeyIdentifier) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{5} +} + +func (x *PublicKeyIdentifier) GetHint() string { + if x != nil { + return x.Hint + } + return "" +} + +// An ASN.1 OBJECT IDENTIFIER +type ObjectIdentifier struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id []int32 `protobuf:"varint,1,rep,packed,name=id,proto3" json:"id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ObjectIdentifier) Reset() { + *x = ObjectIdentifier{} + mi := &file_sigstore_common_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ObjectIdentifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectIdentifier) ProtoMessage() {} + +func (x *ObjectIdentifier) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectIdentifier.ProtoReflect.Descriptor instead. +func (*ObjectIdentifier) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{6} +} + +func (x *ObjectIdentifier) GetId() []int32 { + if x != nil { + return x.Id + } + return nil +} + +// An OID and the corresponding (byte) value. +type ObjectIdentifierValuePair struct { + state protoimpl.MessageState `protogen:"open.v1"` + Oid *ObjectIdentifier `protobuf:"bytes,1,opt,name=oid,proto3" json:"oid,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ObjectIdentifierValuePair) Reset() { + *x = ObjectIdentifierValuePair{} + mi := &file_sigstore_common_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ObjectIdentifierValuePair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectIdentifierValuePair) ProtoMessage() {} + +func (x *ObjectIdentifierValuePair) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectIdentifierValuePair.ProtoReflect.Descriptor instead. +func (*ObjectIdentifierValuePair) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{7} +} + +func (x *ObjectIdentifierValuePair) GetOid() *ObjectIdentifier { + if x != nil { + return x.Oid + } + return nil +} + +func (x *ObjectIdentifierValuePair) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +type DistinguishedName struct { + state protoimpl.MessageState `protogen:"open.v1"` + Organization string `protobuf:"bytes,1,opt,name=organization,proto3" json:"organization,omitempty"` + CommonName string `protobuf:"bytes,2,opt,name=common_name,json=commonName,proto3" json:"common_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DistinguishedName) Reset() { + *x = DistinguishedName{} + mi := &file_sigstore_common_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DistinguishedName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistinguishedName) ProtoMessage() {} + +func (x *DistinguishedName) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistinguishedName.ProtoReflect.Descriptor instead. +func (*DistinguishedName) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{8} +} + +func (x *DistinguishedName) GetOrganization() string { + if x != nil { + return x.Organization + } + return "" +} + +func (x *DistinguishedName) GetCommonName() string { + if x != nil { + return x.CommonName + } + return "" +} + +type X509Certificate struct { + state protoimpl.MessageState `protogen:"open.v1"` + // DER-encoded X.509 certificate. + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *X509Certificate) Reset() { + *x = X509Certificate{} + mi := &file_sigstore_common_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *X509Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509Certificate) ProtoMessage() {} + +func (x *X509Certificate) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509Certificate.ProtoReflect.Descriptor instead. +func (*X509Certificate) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{9} +} + +func (x *X509Certificate) GetRawBytes() []byte { + if x != nil { + return x.RawBytes + } + return nil +} + +type SubjectAlternativeName struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type SubjectAlternativeNameType `protobuf:"varint,1,opt,name=type,proto3,enum=dev.sigstore.common.v1.SubjectAlternativeNameType" json:"type,omitempty"` + // Types that are valid to be assigned to Identity: + // + // *SubjectAlternativeName_Regexp + // *SubjectAlternativeName_Value + Identity isSubjectAlternativeName_Identity `protobuf_oneof:"identity"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SubjectAlternativeName) Reset() { + *x = SubjectAlternativeName{} + mi := &file_sigstore_common_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SubjectAlternativeName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubjectAlternativeName) ProtoMessage() {} + +func (x *SubjectAlternativeName) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubjectAlternativeName.ProtoReflect.Descriptor instead. +func (*SubjectAlternativeName) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{10} +} + +func (x *SubjectAlternativeName) GetType() SubjectAlternativeNameType { + if x != nil { + return x.Type + } + return SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED +} + +func (x *SubjectAlternativeName) GetIdentity() isSubjectAlternativeName_Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (x *SubjectAlternativeName) GetRegexp() string { + if x != nil { + if x, ok := x.Identity.(*SubjectAlternativeName_Regexp); ok { + return x.Regexp + } + } + return "" +} + +func (x *SubjectAlternativeName) GetValue() string { + if x != nil { + if x, ok := x.Identity.(*SubjectAlternativeName_Value); ok { + return x.Value + } + } + return "" +} + +type isSubjectAlternativeName_Identity interface { + isSubjectAlternativeName_Identity() +} + +type SubjectAlternativeName_Regexp struct { + // A regular expression describing the expected value for + // the SAN. + Regexp string `protobuf:"bytes,2,opt,name=regexp,proto3,oneof"` +} + +type SubjectAlternativeName_Value struct { + // The exact value to match against. + Value string `protobuf:"bytes,3,opt,name=value,proto3,oneof"` +} + +func (*SubjectAlternativeName_Regexp) isSubjectAlternativeName_Identity() {} + +func (*SubjectAlternativeName_Value) isSubjectAlternativeName_Identity() {} + +// A collection of X.509 certificates. +// +// This "chain" can be used in multiple contexts, such as providing a root CA +// certificate within a TUF root of trust or multiple untrusted certificates for +// the purpose of chain building. +type X509CertificateChain struct { + state protoimpl.MessageState `protogen:"open.v1"` + // One or more DER-encoded certificates. + // + // In some contexts (such as `VerificationMaterial.x509_certificate_chain`), this sequence + // has an imposed order. Unless explicitly specified, there is otherwise no + // guaranteed order. + Certificates []*X509Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *X509CertificateChain) Reset() { + *x = X509CertificateChain{} + mi := &file_sigstore_common_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *X509CertificateChain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509CertificateChain) ProtoMessage() {} + +func (x *X509CertificateChain) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509CertificateChain.ProtoReflect.Descriptor instead. +func (*X509CertificateChain) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{11} +} + +func (x *X509CertificateChain) GetCertificates() []*X509Certificate { + if x != nil { + return x.Certificates + } + return nil +} + +// The time range is closed and includes both the start and end times, +// (i.e., [start, end]). +// End is optional to be able to capture a period that has started but +// has no known end. +type TimeRange struct { + state protoimpl.MessageState `protogen:"open.v1"` + Start *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end,proto3,oneof" json:"end,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TimeRange) Reset() { + *x = TimeRange{} + mi := &file_sigstore_common_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TimeRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeRange) ProtoMessage() {} + +func (x *TimeRange) ProtoReflect() protoreflect.Message { + mi := &file_sigstore_common_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeRange.ProtoReflect.Descriptor instead. +func (*TimeRange) Descriptor() ([]byte, []int) { + return file_sigstore_common_proto_rawDescGZIP(), []int{12} +} + +func (x *TimeRange) GetStart() *timestamppb.Timestamp { + if x != nil { + return x.Start + } + return nil +} + +func (x *TimeRange) GetEnd() *timestamppb.Timestamp { + if x != nil { + return x.End + } + return nil +} + +var File_sigstore_common_proto protoreflect.FileDescriptor + +var file_sigstore_common_proto_rawDesc = string([]byte{ + 0x0a, 0x15, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x69, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, + 0x43, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x80, 0x01, 0x0a, + 0x10, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x49, 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x65, 0x76, 0x2e, + 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x0d, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x09, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, + 0x23, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x6b, + 0x65, 0x79, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x16, 0x52, 0x46, 0x43, 0x33, 0x31, 0x36, 0x31, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, + 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x73, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xd9, + 0x01, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x09, + 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x49, + 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0a, 0x6b, + 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x09, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, + 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, + 0x01, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x42, 0x0c, 0x0a, 0x0a, + 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x22, 0x29, 0x0a, 0x13, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x68, 0x69, 0x6e, 0x74, 0x22, 0x27, 0x0a, 0x10, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x02, 0x69, 0x64, 0x22, 0x6d, + 0x0a, 0x19, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x3a, 0x0a, 0x03, 0x6f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, + 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x03, 0x6f, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x58, 0x0a, + 0x11, 0x44, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x75, 0x69, 0x73, 0x68, 0x65, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x33, 0x0a, 0x0f, 0x58, 0x35, 0x30, 0x39, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x61, + 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9e, 0x01, 0x0a, + 0x16, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x18, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x06, 0x72, 0x65, 0x67, 0x65, 0x78, 0x70, 0x12, 0x16, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, 0x63, 0x0a, + 0x14, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x64, 0x65, + 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x73, 0x22, 0x78, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x30, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x12, 0x31, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x65, 0x6e, 0x64, 0x2a, 0x75, 0x0a, 0x0d, + 0x48, 0x61, 0x73, 0x68, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1e, 0x0a, + 0x1a, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, + 0x08, 0x53, 0x48, 0x41, 0x32, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x53, + 0x48, 0x41, 0x32, 0x5f, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, + 0x32, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, + 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, 0x33, 0x38, + 0x34, 0x10, 0x05, 0x2a, 0xe9, 0x04, 0x0a, 0x10, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x55, 0x42, 0x4c, + 0x49, 0x43, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x53, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x11, + 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, + 0x35, 0x10, 0x01, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x15, 0x0a, 0x0d, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x02, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x18, + 0x0a, 0x10, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x56, 0x35, 0x10, 0x03, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x14, 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x04, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x21, + 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x56, 0x31, 0x35, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x09, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, + 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x0a, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x56, 0x31, 0x35, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0b, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x10, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x11, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x52, 0x53, 0x41, 0x5f, + 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x12, 0x12, 0x24, 0x0a, 0x1c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, + 0x50, 0x32, 0x35, 0x36, 0x5f, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, + 0x36, 0x10, 0x06, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, + 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, + 0x35, 0x36, 0x10, 0x05, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, + 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x33, 0x38, 0x34, 0x10, + 0x0c, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, + 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x0d, 0x12, 0x10, + 0x0a, 0x0c, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x07, + 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, + 0x5f, 0x50, 0x48, 0x10, 0x08, 0x12, 0x1f, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, + 0x44, 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, 0x36, + 0x10, 0x13, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1f, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, + 0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, + 0x36, 0x10, 0x14, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x4c, 0x4d, 0x53, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0e, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x4d, 0x4f, 0x54, 0x53, + 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0f, 0x22, 0x04, 0x08, 0x15, 0x10, 0x32, 0x2a, + 0x6f, 0x0a, 0x1a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, + 0x29, 0x53, 0x55, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, + 0x54, 0x49, 0x56, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, + 0x45, 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x02, + 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x03, + 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, + 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x3a, 0x3a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_sigstore_common_proto_rawDescOnce sync.Once + file_sigstore_common_proto_rawDescData []byte +) + +func file_sigstore_common_proto_rawDescGZIP() []byte { + file_sigstore_common_proto_rawDescOnce.Do(func() { + file_sigstore_common_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sigstore_common_proto_rawDesc), len(file_sigstore_common_proto_rawDesc))) + }) + return file_sigstore_common_proto_rawDescData +} + +var file_sigstore_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_sigstore_common_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_sigstore_common_proto_goTypes = []any{ + (HashAlgorithm)(0), // 0: dev.sigstore.common.v1.HashAlgorithm + (PublicKeyDetails)(0), // 1: dev.sigstore.common.v1.PublicKeyDetails + (SubjectAlternativeNameType)(0), // 2: dev.sigstore.common.v1.SubjectAlternativeNameType + (*HashOutput)(nil), // 3: dev.sigstore.common.v1.HashOutput + (*MessageSignature)(nil), // 4: dev.sigstore.common.v1.MessageSignature + (*LogId)(nil), // 5: dev.sigstore.common.v1.LogId + (*RFC3161SignedTimestamp)(nil), // 6: dev.sigstore.common.v1.RFC3161SignedTimestamp + (*PublicKey)(nil), // 7: dev.sigstore.common.v1.PublicKey + (*PublicKeyIdentifier)(nil), // 8: dev.sigstore.common.v1.PublicKeyIdentifier + (*ObjectIdentifier)(nil), // 9: dev.sigstore.common.v1.ObjectIdentifier + (*ObjectIdentifierValuePair)(nil), // 10: dev.sigstore.common.v1.ObjectIdentifierValuePair + (*DistinguishedName)(nil), // 11: dev.sigstore.common.v1.DistinguishedName + (*X509Certificate)(nil), // 12: dev.sigstore.common.v1.X509Certificate + (*SubjectAlternativeName)(nil), // 13: dev.sigstore.common.v1.SubjectAlternativeName + (*X509CertificateChain)(nil), // 14: dev.sigstore.common.v1.X509CertificateChain + (*TimeRange)(nil), // 15: dev.sigstore.common.v1.TimeRange + (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp +} +var file_sigstore_common_proto_depIdxs = []int32{ + 0, // 0: dev.sigstore.common.v1.HashOutput.algorithm:type_name -> dev.sigstore.common.v1.HashAlgorithm + 3, // 1: dev.sigstore.common.v1.MessageSignature.message_digest:type_name -> dev.sigstore.common.v1.HashOutput + 1, // 2: dev.sigstore.common.v1.PublicKey.key_details:type_name -> dev.sigstore.common.v1.PublicKeyDetails + 15, // 3: dev.sigstore.common.v1.PublicKey.valid_for:type_name -> dev.sigstore.common.v1.TimeRange + 9, // 4: dev.sigstore.common.v1.ObjectIdentifierValuePair.oid:type_name -> dev.sigstore.common.v1.ObjectIdentifier + 2, // 5: dev.sigstore.common.v1.SubjectAlternativeName.type:type_name -> dev.sigstore.common.v1.SubjectAlternativeNameType + 12, // 6: dev.sigstore.common.v1.X509CertificateChain.certificates:type_name -> dev.sigstore.common.v1.X509Certificate + 16, // 7: dev.sigstore.common.v1.TimeRange.start:type_name -> google.protobuf.Timestamp + 16, // 8: dev.sigstore.common.v1.TimeRange.end:type_name -> google.protobuf.Timestamp + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_sigstore_common_proto_init() } +func file_sigstore_common_proto_init() { + if File_sigstore_common_proto != nil { + return + } + file_sigstore_common_proto_msgTypes[4].OneofWrappers = []any{} + file_sigstore_common_proto_msgTypes[10].OneofWrappers = []any{ + (*SubjectAlternativeName_Regexp)(nil), + (*SubjectAlternativeName_Value)(nil), + } + file_sigstore_common_proto_msgTypes[12].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sigstore_common_proto_rawDesc), len(file_sigstore_common_proto_rawDesc)), + NumEnums: 3, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sigstore_common_proto_goTypes, + DependencyIndexes: file_sigstore_common_proto_depIdxs, + EnumInfos: file_sigstore_common_proto_enumTypes, + MessageInfos: file_sigstore_common_proto_msgTypes, + }.Build() + File_sigstore_common_proto = out.File + file_sigstore_common_proto_goTypes = nil + file_sigstore_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md b/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md deleted file mode 100644 index 0113fcad..00000000 --- a/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md +++ /dev/null @@ -1,116 +0,0 @@ -# Contributing - -When contributing to this repository, please first discuss the change you wish -to make via an [issue](https://github.com/sigstore/rekor/issues). - -## Pull Request Process - -1. Create an [issue](https://github.com/sigstore/rekor/issues) - outlining the fix or feature. -2. Fork the rekor repository to your own github account and clone it locally. -3. Hack on your changes. -4. Update the README.md with details of changes to any interface, this includes new environment - variables, exposed ports, useful file locations, CLI parameters and - new or changed configuration values. -5. Correctly format your commit message see [Commit Messages](#Commit Message Guidelines) - below. -6. Ensure that CI passes, if it fails, fix the failures. -7. Every pull request requires a review from the [core rekor team](https://github.com/orgs/github.com/sigstore/teams/core-team) - before merging. -8. If your pull request consists of more than one commit, please squash your - commits as described in [Squash Commits](#Squash Commits) - -## Commit Message Guidelines - -We follow the commit formatting recommendations found on [Chris Beams' How to Write a Git Commit Message article]((https://chris.beams.io/posts/git-commit/). - -Well formed commit messages not only help reviewers understand the nature of -the Pull Request, but also assists the release process where commit messages -are used to generate release notes. - -A good example of a commit message would be as follows: - -``` -Summarize changes in around 50 characters or less - -More detailed explanatory text, if necessary. Wrap it to about 72 -characters or so. In some contexts, the first line is treated as the -subject of the commit and the rest of the text as the body. The -blank line separating the summary from the body is critical (unless -you omit the body entirely); various tools like `log`, `shortlog` -and `rebase` can get confused if you run the two together. - -Explain the problem that this commit is solving. Focus on why you -are making this change as opposed to how (the code explains that). -Are there side effects or other unintuitive consequences of this -change? Here's the place to explain them. - -Further paragraphs come after blank lines. - - - Bullet points are okay, too - - - Typically a hyphen or asterisk is used for the bullet, preceded - by a single space, with blank lines in between, but conventions - vary here - -If you use an issue tracker, put references to them at the bottom, -like this: - -Resolves: #123 -See also: #456, #789 -``` - -Note the `Resolves #123` tag, this references the issue raised and allows us to -ensure issues are associated and closed when a pull request is merged. - -Please refer to [the github help page on message types](https://help.github.com/articles/closing-issues-using-keywords/) -for a complete list of issue references. - -## Squash Commits - -Should your pull request consist of more than one commit (perhaps due to -a change being requested during the review cycle), please perform a git squash -once a reviewer has approved your pull request. - -A squash can be performed as follows. Let's say you have the following commits: - - initial commit - second commit - final commit - -Run the command below with the number set to the total commits you wish to -squash (in our case 3 commits): - - git rebase -i HEAD~3 - -You default text editor will then open up and you will see the following:: - - pick eb36612 initial commit - pick 9ac8968 second commit - pick a760569 final commit - - # Rebase eb1429f..a760569 onto eb1429f (3 commands) - -We want to rebase on top of our first commit, so we change the other two commits -to `squash`: - - pick eb36612 initial commit - squash 9ac8968 second commit - squash a760569 final commit - -After this, should you wish to update your commit message to better summarise -all of your pull request, run: - - git commit --amend - -You will then need to force push (assuming your initial commit(s) were posted -to github): - - git push origin your-branch --force - -Alternatively, a core member can squash your commits within Github. -## Code of Conduct - -Rekor adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct. -Please take a moment to read the [CODE_OF_CONDUCT.md](https://github.com/sigstore/rekor/blob/master/CODE_OF_CONDUCT.md) document. - diff --git a/vendor/github.com/sigstore/rekor/LICENSE b/vendor/github.com/sigstore/rekor/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/sigstore/rekor/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go deleted file mode 100644 index 5607679f..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Alpine Alpine package -// -// swagger:model alpine -type Alpine struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec AlpineSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Alpine) Kind() string { - return "alpine" -} - -// SetKind sets the kind of this subtype -func (m *Alpine) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Alpine) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec AlpineSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Alpine - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Alpine) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec AlpineSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this alpine -func (m *Alpine) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Alpine) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Alpine) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this alpine based on the context it is used -func (m *Alpine) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Alpine) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Alpine) UnmarshalBinary(b []byte) error { - var res Alpine - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go deleted file mode 100644 index edd25408..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// AlpineSchema Alpine Package Schema -// -// # Schema for Alpine package objects -// -// swagger:model alpineSchema -type AlpineSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go deleted file mode 100644 index 250a6125..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go +++ /dev/null @@ -1,455 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// AlpineV001Schema Alpine v0.0.1 Schema -// -// # Schema for Alpine Package entries -// -// swagger:model alpineV001Schema -type AlpineV001Schema struct { - - // package - // Required: true - Package *AlpineV001SchemaPackage `json:"package"` - - // public key - // Required: true - PublicKey *AlpineV001SchemaPublicKey `json:"publicKey"` -} - -// Validate validates this alpine v001 schema -func (m *AlpineV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePackage(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001Schema) validatePackage(formats strfmt.Registry) error { - - if err := validate.Required("package", "body", m.Package); err != nil { - return err - } - - if m.Package != nil { - if err := m.Package.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *AlpineV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this alpine v001 schema based on the context it is used -func (m *AlpineV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePackage(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { - - if m.Package != nil { - - if err := m.Package.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *AlpineV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001Schema) UnmarshalBinary(b []byte) error { - var res AlpineV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// AlpineV001SchemaPackage Information about the package associated with the entry -// -// swagger:model AlpineV001SchemaPackage -type AlpineV001SchemaPackage struct { - - // Specifies the package inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *AlpineV001SchemaPackageHash `json:"hash,omitempty"` - - // Values of the .PKGINFO key / value pairs - // Read Only: true - Pkginfo map[string]string `json:"pkginfo,omitempty"` -} - -// Validate validates this alpine v001 schema package -func (m *AlpineV001SchemaPackage) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001SchemaPackage) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this alpine v001 schema package based on the context it is used -func (m *AlpineV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePkginfo(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *AlpineV001SchemaPackage) contextValidatePkginfo(ctx context.Context, formats strfmt.Registry) error { - - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001SchemaPackage) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001SchemaPackage) UnmarshalBinary(b []byte) error { - var res AlpineV001SchemaPackage - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// AlpineV001SchemaPackageHash Specifies the hash algorithm and value for the package -// -// swagger:model AlpineV001SchemaPackageHash -type AlpineV001SchemaPackageHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the package - // Required: true - Value *string `json:"value"` -} - -// Validate validates this alpine v001 schema package hash -func (m *AlpineV001SchemaPackageHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var alpineV001SchemaPackageHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - alpineV001SchemaPackageHashTypeAlgorithmPropEnum = append(alpineV001SchemaPackageHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // AlpineV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256" - AlpineV001SchemaPackageHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *AlpineV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, alpineV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *AlpineV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *AlpineV001SchemaPackageHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this alpine v001 schema package hash based on the context it is used -func (m *AlpineV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001SchemaPackageHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001SchemaPackageHash) UnmarshalBinary(b []byte) error { - var res AlpineV001SchemaPackageHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// AlpineV001SchemaPublicKey The public key that can verify the package signature -// -// swagger:model AlpineV001SchemaPublicKey -type AlpineV001SchemaPublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this alpine v001 schema public key -func (m *AlpineV001SchemaPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this alpine v001 schema public key based on context it is used -func (m *AlpineV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001SchemaPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001SchemaPublicKey) UnmarshalBinary(b []byte) error { - var res AlpineV001SchemaPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go deleted file mode 100644 index 804ddd11..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go +++ /dev/null @@ -1,118 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// ConsistencyProof consistency proof -// -// swagger:model ConsistencyProof -type ConsistencyProof struct { - - // hashes - // Required: true - Hashes []string `json:"hashes"` - - // The hash value stored at the root of the merkle tree at the time the proof was generated - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` -} - -// Validate validates this consistency proof -func (m *ConsistencyProof) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHashes(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *ConsistencyProof) validateHashes(formats strfmt.Registry) error { - - if err := validate.Required("hashes", "body", m.Hashes); err != nil { - return err - } - - for i := 0; i < len(m.Hashes); i++ { - - if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - } - - return nil -} - -func (m *ConsistencyProof) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this consistency proof based on context it is used -func (m *ConsistencyProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *ConsistencyProof) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *ConsistencyProof) UnmarshalBinary(b []byte) error { - var res ConsistencyProof - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go deleted file mode 100644 index 8de4083b..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Cose COSE object -// -// swagger:model cose -type Cose struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec CoseSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Cose) Kind() string { - return "cose" -} - -// SetKind sets the kind of this subtype -func (m *Cose) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Cose) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec CoseSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Cose - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Cose) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec CoseSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this cose -func (m *Cose) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Cose) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Cose) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this cose based on the context it is used -func (m *Cose) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Cose) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Cose) UnmarshalBinary(b []byte) error { - var res Cose - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go deleted file mode 100644 index e653f220..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CoseSchema COSE Schema -// -// # COSE for Rekord objects -// -// swagger:model coseSchema -type CoseSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go deleted file mode 100644 index 93dce871..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go +++ /dev/null @@ -1,521 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// CoseV001Schema cose v0.0.1 Schema -// -// # Schema for cose object -// -// swagger:model coseV001Schema -type CoseV001Schema struct { - - // data - Data *CoseV001SchemaData `json:"data,omitempty"` - - // The COSE Sign1 Message - // Format: byte - Message strfmt.Base64 `json:"message,omitempty"` - - // The public key that can verify the signature - // Required: true - // Format: byte - PublicKey *strfmt.Base64 `json:"publicKey"` -} - -// Validate validates this cose v001 schema -func (m *CoseV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateData(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001Schema) validateData(formats strfmt.Registry) error { - if swag.IsZero(m.Data) { // not required - return nil - } - - if m.Data != nil { - if err := m.Data.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *CoseV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this cose v001 schema based on the context it is used -func (m *CoseV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateData(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { - - if m.Data != nil { - - if swag.IsZero(m.Data) { // not required - return nil - } - - if err := m.Data.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001Schema) UnmarshalBinary(b []byte) error { - var res CoseV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// CoseV001SchemaData Information about the content associated with the entry -// -// swagger:model CoseV001SchemaData -type CoseV001SchemaData struct { - - // Specifies the additional authenticated data required to verify the signature - // Format: byte - Aad strfmt.Base64 `json:"aad,omitempty"` - - // envelope hash - EnvelopeHash *CoseV001SchemaDataEnvelopeHash `json:"envelopeHash,omitempty"` - - // payload hash - PayloadHash *CoseV001SchemaDataPayloadHash `json:"payloadHash,omitempty"` -} - -// Validate validates this cose v001 schema data -func (m *CoseV001SchemaData) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelopeHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001SchemaData) validateEnvelopeHash(formats strfmt.Registry) error { - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if m.EnvelopeHash != nil { - if err := m.EnvelopeHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "envelopeHash") - } - return err - } - } - - return nil -} - -func (m *CoseV001SchemaData) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this cose v001 schema data based on the context it is used -func (m *CoseV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001SchemaData) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { - - if m.EnvelopeHash != nil { - - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "envelopeHash") - } - return err - } - } - - return nil -} - -func (m *CoseV001SchemaData) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001SchemaData) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001SchemaData) UnmarshalBinary(b []byte) error { - var res CoseV001SchemaData - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// CoseV001SchemaDataEnvelopeHash Specifies the hash algorithm and value for the COSE envelope -// -// swagger:model CoseV001SchemaDataEnvelopeHash -type CoseV001SchemaDataEnvelopeHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the envelope - // Required: true - Value *string `json:"value"` -} - -// Validate validates this cose v001 schema data envelope hash -func (m *CoseV001SchemaDataEnvelopeHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum = append(coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // CoseV001SchemaDataEnvelopeHashAlgorithmSha256 captures enum value "sha256" - CoseV001SchemaDataEnvelopeHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *CoseV001SchemaDataEnvelopeHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"envelopeHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this cose v001 schema data envelope hash based on the context it is used -func (m *CoseV001SchemaDataEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001SchemaDataEnvelopeHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001SchemaDataEnvelopeHash) UnmarshalBinary(b []byte) error { - var res CoseV001SchemaDataEnvelopeHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// CoseV001SchemaDataPayloadHash Specifies the hash algorithm and value for the content -// -// swagger:model CoseV001SchemaDataPayloadHash -type CoseV001SchemaDataPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the content - // Required: true - Value *string `json:"value"` -} - -// Validate validates this cose v001 schema data payload hash -func (m *CoseV001SchemaDataPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum = append(coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // CoseV001SchemaDataPayloadHashAlgorithmSha256 captures enum value "sha256" - CoseV001SchemaDataPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *CoseV001SchemaDataPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *CoseV001SchemaDataPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *CoseV001SchemaDataPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this cose v001 schema data payload hash based on the context it is used -func (m *CoseV001SchemaDataPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001SchemaDataPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001SchemaDataPayloadHash) UnmarshalBinary(b []byte) error { - var res CoseV001SchemaDataPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go deleted file mode 100644 index dde56205..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// DSSE DSSE envelope -// -// swagger:model dsse -type DSSE struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec DSSESchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *DSSE) Kind() string { - return "dsse" -} - -// SetKind sets the kind of this subtype -func (m *DSSE) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *DSSE) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec DSSESchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result DSSE - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m DSSE) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec DSSESchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this dsse -func (m *DSSE) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSE) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *DSSE) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this dsse based on the context it is used -func (m *DSSE) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *DSSE) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSE) UnmarshalBinary(b []byte) error { - var res DSSE - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go deleted file mode 100644 index 77956264..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// DSSESchema DSSE Schema -// -// log entry schema for dsse envelopes -// -// swagger:model dsseSchema -type DSSESchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go deleted file mode 100644 index ec4c32bf..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go +++ /dev/null @@ -1,685 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// DSSEV001Schema DSSE v0.0.1 Schema -// -// # Schema for DSSE envelopes -// -// swagger:model dsseV001Schema -type DSSEV001Schema struct { - - // envelope hash - EnvelopeHash *DSSEV001SchemaEnvelopeHash `json:"envelopeHash,omitempty"` - - // payload hash - PayloadHash *DSSEV001SchemaPayloadHash `json:"payloadHash,omitempty"` - - // proposed content - ProposedContent *DSSEV001SchemaProposedContent `json:"proposedContent,omitempty"` - - // extracted collection of all signatures of the envelope's payload; elements will be sorted by lexicographical order of the base64 encoded signature strings - // Read Only: true - // Min Items: 1 - Signatures []*DSSEV001SchemaSignaturesItems0 `json:"signatures"` -} - -// Validate validates this dsse v001 schema -func (m *DSSEV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelopeHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateProposedContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignatures(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001Schema) validateEnvelopeHash(formats strfmt.Registry) error { - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if m.EnvelopeHash != nil { - if err := m.EnvelopeHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("envelopeHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("payloadHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) validateProposedContent(formats strfmt.Registry) error { - if swag.IsZero(m.ProposedContent) { // not required - return nil - } - - if m.ProposedContent != nil { - if err := m.ProposedContent.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("proposedContent") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("proposedContent") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) validateSignatures(formats strfmt.Registry) error { - if swag.IsZero(m.Signatures) { // not required - return nil - } - - iSignaturesSize := int64(len(m.Signatures)) - - if err := validate.MinItems("signatures", "body", iSignaturesSize, 1); err != nil { - return err - } - - for i := 0; i < len(m.Signatures); i++ { - if swag.IsZero(m.Signatures[i]) { // not required - continue - } - - if m.Signatures[i] != nil { - if err := m.Signatures[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// ContextValidate validate this dsse v001 schema based on the context it is used -func (m *DSSEV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateProposedContent(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignatures(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { - - if m.EnvelopeHash != nil { - - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("envelopeHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("payloadHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) contextValidateProposedContent(ctx context.Context, formats strfmt.Registry) error { - - if m.ProposedContent != nil { - - if swag.IsZero(m.ProposedContent) { // not required - return nil - } - - if err := m.ProposedContent.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("proposedContent") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("proposedContent") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "signatures", "body", []*DSSEV001SchemaSignaturesItems0(m.Signatures)); err != nil { - return err - } - - for i := 0; i < len(m.Signatures); i++ { - - if m.Signatures[i] != nil { - - if swag.IsZero(m.Signatures[i]) { // not required - return nil - } - - if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001Schema) UnmarshalBinary(b []byte) error { - var res DSSEV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaEnvelopeHash Specifies the hash algorithm and value encompassing the entire envelope sent to Rekor -// -// swagger:model DSSEV001SchemaEnvelopeHash -type DSSEV001SchemaEnvelopeHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The value of the computed digest over the entire envelope - // Required: true - Value *string `json:"value"` -} - -// Validate validates this DSSE v001 schema envelope hash -func (m *DSSEV001SchemaEnvelopeHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum = append(dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // DSSEV001SchemaEnvelopeHashAlgorithmSha256 captures enum value "sha256" - DSSEV001SchemaEnvelopeHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaEnvelopeHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("envelopeHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this DSSE v001 schema envelope hash based on the context it is used -func (m *DSSEV001SchemaEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaEnvelopeHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaEnvelopeHash) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaEnvelopeHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope -// -// swagger:model DSSEV001SchemaPayloadHash -type DSSEV001SchemaPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The value of the computed digest over the payload within the envelope - // Required: true - Value *string `json:"value"` -} - -// Validate validates this DSSE v001 schema payload hash -func (m *DSSEV001SchemaPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var dsseV001SchemaPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - dsseV001SchemaPayloadHashTypeAlgorithmPropEnum = append(dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // DSSEV001SchemaPayloadHashAlgorithmSha256 captures enum value "sha256" - DSSEV001SchemaPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *DSSEV001SchemaPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *DSSEV001SchemaPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this DSSE v001 schema payload hash based on the context it is used -func (m *DSSEV001SchemaPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaPayloadHash) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaProposedContent DSSE v001 schema proposed content -// -// swagger:model DSSEV001SchemaProposedContent -type DSSEV001SchemaProposedContent struct { - - // DSSE envelope specified as a stringified JSON object - // Required: true - Envelope *string `json:"envelope"` - - // collection of all verification material (e.g. public keys or certificates) used to verify signatures over envelope's payload, specified as base64-encoded strings - // Required: true - // Min Items: 1 - Verifiers []strfmt.Base64 `json:"verifiers"` -} - -// Validate validates this DSSE v001 schema proposed content -func (m *DSSEV001SchemaProposedContent) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelope(formats); err != nil { - res = append(res, err) - } - - if err := m.validateVerifiers(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001SchemaProposedContent) validateEnvelope(formats strfmt.Registry) error { - - if err := validate.Required("proposedContent"+"."+"envelope", "body", m.Envelope); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaProposedContent) validateVerifiers(formats strfmt.Registry) error { - - if err := validate.Required("proposedContent"+"."+"verifiers", "body", m.Verifiers); err != nil { - return err - } - - iVerifiersSize := int64(len(m.Verifiers)) - - if err := validate.MinItems("proposedContent"+"."+"verifiers", "body", iVerifiersSize, 1); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this DSSE v001 schema proposed content based on context it is used -func (m *DSSEV001SchemaProposedContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaProposedContent) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaProposedContent) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaProposedContent - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaSignaturesItems0 a signature of the envelope's payload along with the verification material for the signature -// -// swagger:model DSSEV001SchemaSignaturesItems0 -type DSSEV001SchemaSignaturesItems0 struct { - - // base64 encoded signature of the payload - // Required: true - // Pattern: ^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$ - Signature *string `json:"signature"` - - // verification material that was used to verify the corresponding signature, specified as a base64 encoded string - // Required: true - // Format: byte - Verifier *strfmt.Base64 `json:"verifier"` -} - -// Validate validates this DSSE v001 schema signatures items0 -func (m *DSSEV001SchemaSignaturesItems0) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if err := m.validateVerifier(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001SchemaSignaturesItems0) validateSignature(formats strfmt.Registry) error { - - if err := validate.Required("signature", "body", m.Signature); err != nil { - return err - } - - if err := validate.Pattern("signature", "body", *m.Signature, `^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$`); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaSignaturesItems0) validateVerifier(formats strfmt.Registry) error { - - if err := validate.Required("verifier", "body", m.Verifier); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this DSSE v001 schema signatures items0 based on context it is used -func (m *DSSEV001SchemaSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaSignaturesItems0) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaSignaturesItems0) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaSignaturesItems0 - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go deleted file mode 100644 index ac14f202..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go +++ /dev/null @@ -1,69 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// Error error -// -// swagger:model Error -type Error struct { - - // code - Code int64 `json:"code,omitempty"` - - // message - Message string `json:"message,omitempty"` -} - -// Validate validates this error -func (m *Error) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this error based on context it is used -func (m *Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *Error) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Error) UnmarshalBinary(b []byte) error { - var res Error - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go deleted file mode 100644 index b3e1f8a3..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Hashedrekord Hashed Rekord object -// -// swagger:model hashedrekord -type Hashedrekord struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HashedrekordSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Hashedrekord) Kind() string { - return "hashedrekord" -} - -// SetKind sets the kind of this subtype -func (m *Hashedrekord) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Hashedrekord) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HashedrekordSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Hashedrekord - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Hashedrekord) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HashedrekordSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this hashedrekord -func (m *Hashedrekord) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Hashedrekord) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Hashedrekord) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this hashedrekord based on the context it is used -func (m *Hashedrekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Hashedrekord) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Hashedrekord) UnmarshalBinary(b []byte) error { - var res Hashedrekord - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go deleted file mode 100644 index 387a9392..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// HashedrekordSchema Rekor Schema -// -// # Schema for Rekord objects -// -// swagger:model hashedrekordSchema -type HashedrekordSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go deleted file mode 100644 index 3b906ae2..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go +++ /dev/null @@ -1,519 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// HashedrekordV001Schema Hashed Rekor v0.0.1 Schema -// -// # Schema for Hashed Rekord object -// -// swagger:model hashedrekordV001Schema -type HashedrekordV001Schema struct { - - // data - // Required: true - Data *HashedrekordV001SchemaData `json:"data"` - - // signature - // Required: true - Signature *HashedrekordV001SchemaSignature `json:"signature"` -} - -// Validate validates this hashedrekord v001 schema -func (m *HashedrekordV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateData(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001Schema) validateData(formats strfmt.Registry) error { - - if err := validate.Required("data", "body", m.Data); err != nil { - return err - } - - if m.Data != nil { - if err := m.Data.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *HashedrekordV001Schema) validateSignature(formats strfmt.Registry) error { - - if err := validate.Required("signature", "body", m.Signature); err != nil { - return err - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this hashedrekord v001 schema based on the context it is used -func (m *HashedrekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateData(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { - - if m.Data != nil { - - if err := m.Data.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *HashedrekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001Schema) UnmarshalBinary(b []byte) error { - var res HashedrekordV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaData Information about the content associated with the entry -// -// swagger:model HashedrekordV001SchemaData -type HashedrekordV001SchemaData struct { - - // hash - Hash *HashedrekordV001SchemaDataHash `json:"hash,omitempty"` -} - -// Validate validates this hashedrekord v001 schema data -func (m *HashedrekordV001SchemaData) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaData) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this hashedrekord v001 schema data based on the context it is used -func (m *HashedrekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaData) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaData) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaData - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaDataHash Specifies the hash algorithm and value for the content -// -// swagger:model HashedrekordV001SchemaDataHash -type HashedrekordV001SchemaDataHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256 sha384 sha512] - Algorithm *string `json:"algorithm"` - - // The hash value for the content, as represented by a lower case hexadecimal string - // Required: true - Value *string `json:"value"` -} - -// Validate validates this hashedrekord v001 schema data hash -func (m *HashedrekordV001SchemaDataHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256","sha384","sha512"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum = append(hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // HashedrekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" - HashedrekordV001SchemaDataHashAlgorithmSha256 string = "sha256" - - // HashedrekordV001SchemaDataHashAlgorithmSha384 captures enum value "sha384" - HashedrekordV001SchemaDataHashAlgorithmSha384 string = "sha384" - - // HashedrekordV001SchemaDataHashAlgorithmSha512 captures enum value "sha512" - HashedrekordV001SchemaDataHashAlgorithmSha512 string = "sha512" -) - -// prop value enum -func (m *HashedrekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *HashedrekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *HashedrekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this hashedrekord v001 schema data hash based on context it is used -func (m *HashedrekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaDataHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaDataHash) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaDataHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaSignature Information about the detached signature associated with the entry -// -// swagger:model HashedrekordV001SchemaSignature -type HashedrekordV001SchemaSignature struct { - - // Specifies the content of the signature inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // public key - PublicKey *HashedrekordV001SchemaSignaturePublicKey `json:"publicKey,omitempty"` -} - -// Validate validates this hashedrekord v001 schema signature -func (m *HashedrekordV001SchemaSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this hashedrekord v001 schema signature based on the context it is used -func (m *HashedrekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignature) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaSignaturePublicKey The public key that can verify the signature; this can also be an X509 code signing certificate that contains the raw public key information -// -// swagger:model HashedrekordV001SchemaSignaturePublicKey -type HashedrekordV001SchemaSignaturePublicKey struct { - - // Specifies the content of the public key or code signing certificate inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` -} - -// Validate validates this hashedrekord v001 schema signature public key -func (m *HashedrekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this hashedrekord v001 schema signature public key based on context it is used -func (m *HashedrekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaSignaturePublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go deleted file mode 100644 index d19b8bc8..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Helm Helm chart -// -// swagger:model helm -type Helm struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HelmSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Helm) Kind() string { - return "helm" -} - -// SetKind sets the kind of this subtype -func (m *Helm) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Helm) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HelmSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Helm - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Helm) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HelmSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this helm -func (m *Helm) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Helm) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Helm) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this helm based on the context it is used -func (m *Helm) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Helm) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Helm) UnmarshalBinary(b []byte) error { - var res Helm - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go deleted file mode 100644 index 0ab87df9..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// HelmSchema Helm Schema -// -// # Schema for Helm objects -// -// swagger:model helmSchema -type HelmSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go deleted file mode 100644 index 930efc87..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go +++ /dev/null @@ -1,662 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// HelmV001Schema Helm v0.0.1 Schema -// -// # Schema for Helm object -// -// swagger:model helmV001Schema -type HelmV001Schema struct { - - // chart - // Required: true - Chart *HelmV001SchemaChart `json:"chart"` - - // public key - // Required: true - PublicKey *HelmV001SchemaPublicKey `json:"publicKey"` -} - -// Validate validates this helm v001 schema -func (m *HelmV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateChart(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001Schema) validateChart(formats strfmt.Registry) error { - - if err := validate.Required("chart", "body", m.Chart); err != nil { - return err - } - - if m.Chart != nil { - if err := m.Chart.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart") - } - return err - } - } - - return nil -} - -func (m *HelmV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this helm v001 schema based on the context it is used -func (m *HelmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateChart(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001Schema) contextValidateChart(ctx context.Context, formats strfmt.Registry) error { - - if m.Chart != nil { - - if err := m.Chart.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart") - } - return err - } - } - - return nil -} - -func (m *HelmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001Schema) UnmarshalBinary(b []byte) error { - var res HelmV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChart Information about the Helm chart associated with the entry -// -// swagger:model HelmV001SchemaChart -type HelmV001SchemaChart struct { - - // hash - Hash *HelmV001SchemaChartHash `json:"hash,omitempty"` - - // provenance - // Required: true - Provenance *HelmV001SchemaChartProvenance `json:"provenance"` -} - -// Validate validates this helm v001 schema chart -func (m *HelmV001SchemaChart) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateProvenance(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChart) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *HelmV001SchemaChart) validateProvenance(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"provenance", "body", m.Provenance); err != nil { - return err - } - - if m.Provenance != nil { - if err := m.Provenance.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance") - } - return err - } - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart based on the context it is used -func (m *HelmV001SchemaChart) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateProvenance(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChart) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *HelmV001SchemaChart) contextValidateProvenance(ctx context.Context, formats strfmt.Registry) error { - - if m.Provenance != nil { - - if err := m.Provenance.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChart) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChart) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChart - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChartHash Specifies the hash algorithm and value for the chart -// -// swagger:model HelmV001SchemaChartHash -type HelmV001SchemaChartHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the chart - // Required: true - Value *string `json:"value"` -} - -// Validate validates this helm v001 schema chart hash -func (m *HelmV001SchemaChartHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var helmV001SchemaChartHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - helmV001SchemaChartHashTypeAlgorithmPropEnum = append(helmV001SchemaChartHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // HelmV001SchemaChartHashAlgorithmSha256 captures enum value "sha256" - HelmV001SchemaChartHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *HelmV001SchemaChartHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, helmV001SchemaChartHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *HelmV001SchemaChartHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("chart"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *HelmV001SchemaChartHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart hash based on the context it is used -func (m *HelmV001SchemaChartHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChartHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChartHash) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChartHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChartProvenance The provenance entry associated with the signed Helm Chart -// -// swagger:model HelmV001SchemaChartProvenance -type HelmV001SchemaChartProvenance struct { - - // Specifies the content of the provenance file inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // signature - Signature *HelmV001SchemaChartProvenanceSignature `json:"signature,omitempty"` -} - -// Validate validates this helm v001 schema chart provenance -func (m *HelmV001SchemaChartProvenance) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenance) validateSignature(formats strfmt.Registry) error { - if swag.IsZero(m.Signature) { // not required - return nil - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance" + "." + "signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance" + "." + "signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart provenance based on the context it is used -func (m *HelmV001SchemaChartProvenance) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenance) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if swag.IsZero(m.Signature) { // not required - return nil - } - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance" + "." + "signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance" + "." + "signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenance) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenance) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChartProvenance - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChartProvenanceSignature Information about the included signature in the provenance file -// -// swagger:model HelmV001SchemaChartProvenanceSignature -type HelmV001SchemaChartProvenanceSignature struct { - - // Specifies the signature embedded within the provenance file - // Required: true - // Read Only: true - // Format: byte - Content strfmt.Base64 `json:"content"` -} - -// Validate validates this helm v001 schema chart provenance signature -func (m *HelmV001SchemaChartProvenanceSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenanceSignature) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart provenance signature based on the context it is used -func (m *HelmV001SchemaChartProvenanceSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenanceSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenanceSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenanceSignature) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChartProvenanceSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaPublicKey The public key that can verify the package signature -// -// swagger:model HelmV001SchemaPublicKey -type HelmV001SchemaPublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this helm v001 schema public key -func (m *HelmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this helm v001 schema public key based on context it is used -func (m *HelmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaPublicKey) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go deleted file mode 100644 index c555eb2d..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// InactiveShardLogInfo inactive shard log info -// -// swagger:model InactiveShardLogInfo -type InactiveShardLogInfo struct { - - // The current hash value stored at the root of the merkle tree - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` - - // The current signed tree head - // Required: true - SignedTreeHead *string `json:"signedTreeHead"` - - // The current treeID - // Required: true - // Pattern: ^[0-9]+$ - TreeID *string `json:"treeID"` - - // The current number of nodes in the merkle tree - // Required: true - // Minimum: 1 - TreeSize *int64 `json:"treeSize"` -} - -// Validate validates this inactive shard log info -func (m *InactiveShardLogInfo) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignedTreeHead(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeID(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeSize(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *InactiveShardLogInfo) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *InactiveShardLogInfo) validateSignedTreeHead(formats strfmt.Registry) error { - - if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil { - return err - } - - return nil -} - -func (m *InactiveShardLogInfo) validateTreeID(formats strfmt.Registry) error { - - if err := validate.Required("treeID", "body", m.TreeID); err != nil { - return err - } - - if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { - return err - } - - return nil -} - -func (m *InactiveShardLogInfo) validateTreeSize(formats strfmt.Registry) error { - - if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { - return err - } - - if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this inactive shard log info based on context it is used -func (m *InactiveShardLogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *InactiveShardLogInfo) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *InactiveShardLogInfo) UnmarshalBinary(b []byte) error { - var res InactiveShardLogInfo - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go deleted file mode 100644 index 86f0d7b9..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// InclusionProof inclusion proof -// -// swagger:model InclusionProof -type InclusionProof struct { - - // The checkpoint (signed tree head) that the inclusion proof is based on - // Required: true - Checkpoint *string `json:"checkpoint"` - - // A list of hashes required to compute the inclusion proof, sorted in order from leaf to root - // Required: true - Hashes []string `json:"hashes"` - - // The index of the entry in the transparency log - // Required: true - // Minimum: 0 - LogIndex *int64 `json:"logIndex"` - - // The hash value stored at the root of the merkle tree at the time the proof was generated - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` - - // The size of the merkle tree at the time the inclusion proof was generated - // Required: true - // Minimum: 1 - TreeSize *int64 `json:"treeSize"` -} - -// Validate validates this inclusion proof -func (m *InclusionProof) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateCheckpoint(formats); err != nil { - res = append(res, err) - } - - if err := m.validateHashes(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogIndex(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeSize(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *InclusionProof) validateCheckpoint(formats strfmt.Registry) error { - - if err := validate.Required("checkpoint", "body", m.Checkpoint); err != nil { - return err - } - - return nil -} - -func (m *InclusionProof) validateHashes(formats strfmt.Registry) error { - - if err := validate.Required("hashes", "body", m.Hashes); err != nil { - return err - } - - for i := 0; i < len(m.Hashes); i++ { - - if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - } - - return nil -} - -func (m *InclusionProof) validateLogIndex(formats strfmt.Registry) error { - - if err := validate.Required("logIndex", "body", m.LogIndex); err != nil { - return err - } - - if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil { - return err - } - - return nil -} - -func (m *InclusionProof) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *InclusionProof) validateTreeSize(formats strfmt.Registry) error { - - if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { - return err - } - - if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this inclusion proof based on context it is used -func (m *InclusionProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *InclusionProof) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *InclusionProof) UnmarshalBinary(b []byte) error { - var res InclusionProof - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go deleted file mode 100644 index 4f208de1..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Intoto Intoto object -// -// swagger:model intoto -type Intoto struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec IntotoSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Intoto) Kind() string { - return "intoto" -} - -// SetKind sets the kind of this subtype -func (m *Intoto) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Intoto) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec IntotoSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Intoto - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Intoto) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec IntotoSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this intoto -func (m *Intoto) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Intoto) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Intoto) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this intoto based on the context it is used -func (m *Intoto) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Intoto) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Intoto) UnmarshalBinary(b []byte) error { - var res Intoto - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go deleted file mode 100644 index a7fdaa6a..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IntotoSchema Intoto Schema -// -// # Intoto for Rekord objects -// -// swagger:model intotoSchema -type IntotoSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go deleted file mode 100644 index 0c299b1c..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go +++ /dev/null @@ -1,514 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// IntotoV001Schema intoto v0.0.1 Schema -// -// # Schema for intoto object -// -// swagger:model intotoV001Schema -type IntotoV001Schema struct { - - // content - // Required: true - Content *IntotoV001SchemaContent `json:"content"` - - // The public key that can verify the signature - // Required: true - // Format: byte - PublicKey *strfmt.Base64 `json:"publicKey"` -} - -// Validate validates this intoto v001 schema -func (m *IntotoV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001Schema) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("content", "body", m.Content); err != nil { - return err - } - - if m.Content != nil { - if err := m.Content.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -func (m *IntotoV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v001 schema based on the context it is used -func (m *IntotoV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if m.Content != nil { - - if err := m.Content.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001Schema) UnmarshalBinary(b []byte) error { - var res IntotoV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV001SchemaContent intoto v001 schema content -// -// swagger:model IntotoV001SchemaContent -type IntotoV001SchemaContent struct { - - // envelope - Envelope string `json:"envelope,omitempty"` - - // hash - Hash *IntotoV001SchemaContentHash `json:"hash,omitempty"` - - // payload hash - PayloadHash *IntotoV001SchemaContentPayloadHash `json:"payloadHash,omitempty"` -} - -// Validate validates this intoto v001 schema content -func (m *IntotoV001SchemaContent) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001SchemaContent) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV001SchemaContent) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this intoto v001 schema content based on the context it is used -func (m *IntotoV001SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV001SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001SchemaContent) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001SchemaContent) UnmarshalBinary(b []byte) error { - var res IntotoV001SchemaContent - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV001SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope; this is computed by the rekor server, client-provided values are ignored -// -// swagger:model IntotoV001SchemaContentHash -type IntotoV001SchemaContentHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the archive - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v001 schema content hash -func (m *IntotoV001SchemaContentHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV001SchemaContentHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV001SchemaContentHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV001SchemaContentHashAlgorithmSha256 captures enum value "sha256" - IntotoV001SchemaContentHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV001SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV001SchemaContentHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV001SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV001SchemaContentHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v001 schema content hash based on the context it is used -func (m *IntotoV001SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001SchemaContentHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001SchemaContentHash) UnmarshalBinary(b []byte) error { - var res IntotoV001SchemaContentHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV001SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope; this is computed by the rekor server, client-provided values are ignored -// -// swagger:model IntotoV001SchemaContentPayloadHash -type IntotoV001SchemaContentPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the envelope's payload - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v001 schema content payload hash -func (m *IntotoV001SchemaContentPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV001SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256" - IntotoV001SchemaContentPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV001SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v001 schema content payload hash based on the context it is used -func (m *IntotoV001SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001SchemaContentPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001SchemaContentPayloadHash) UnmarshalBinary(b []byte) error { - var res IntotoV001SchemaContentPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go deleted file mode 100644 index c2c08ea5..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go +++ /dev/null @@ -1,757 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// IntotoV002Schema intoto v0.0.2 Schema -// -// # Schema for intoto object -// -// swagger:model intotoV002Schema -type IntotoV002Schema struct { - - // content - // Required: true - Content *IntotoV002SchemaContent `json:"content"` -} - -// Validate validates this intoto v002 schema -func (m *IntotoV002Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002Schema) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("content", "body", m.Content); err != nil { - return err - } - - if m.Content != nil { - if err := m.Content.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -// ContextValidate validate this intoto v002 schema based on the context it is used -func (m *IntotoV002Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if m.Content != nil { - - if err := m.Content.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002Schema) UnmarshalBinary(b []byte) error { - var res IntotoV002Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContent intoto v002 schema content -// -// swagger:model IntotoV002SchemaContent -type IntotoV002SchemaContent struct { - - // envelope - // Required: true - Envelope *IntotoV002SchemaContentEnvelope `json:"envelope"` - - // hash - Hash *IntotoV002SchemaContentHash `json:"hash,omitempty"` - - // payload hash - PayloadHash *IntotoV002SchemaContentPayloadHash `json:"payloadHash,omitempty"` -} - -// Validate validates this intoto v002 schema content -func (m *IntotoV002SchemaContent) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelope(formats); err != nil { - res = append(res, err) - } - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContent) validateEnvelope(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"envelope", "body", m.Envelope); err != nil { - return err - } - - if m.Envelope != nil { - if err := m.Envelope.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content based on the context it is used -func (m *IntotoV002SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEnvelope(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContent) contextValidateEnvelope(ctx context.Context, formats strfmt.Registry) error { - - if m.Envelope != nil { - - if err := m.Envelope.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContent) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContent) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContent - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentEnvelope dsse envelope -// -// swagger:model IntotoV002SchemaContentEnvelope -type IntotoV002SchemaContentEnvelope struct { - - // payload of the envelope - // Format: byte - Payload strfmt.Base64 `json:"payload,omitempty"` - - // type describing the payload - // Required: true - PayloadType *string `json:"payloadType"` - - // collection of all signatures of the envelope's payload - // Required: true - // Min Items: 1 - Signatures []*IntotoV002SchemaContentEnvelopeSignaturesItems0 `json:"signatures"` -} - -// Validate validates this intoto v002 schema content envelope -func (m *IntotoV002SchemaContentEnvelope) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePayloadType(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignatures(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContentEnvelope) validatePayloadType(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"envelope"+"."+"payloadType", "body", m.PayloadType); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentEnvelope) validateSignatures(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"envelope"+"."+"signatures", "body", m.Signatures); err != nil { - return err - } - - iSignaturesSize := int64(len(m.Signatures)) - - if err := validate.MinItems("content"+"."+"envelope"+"."+"signatures", "body", iSignaturesSize, 1); err != nil { - return err - } - - for i := 0; i < len(m.Signatures); i++ { - if swag.IsZero(m.Signatures[i]) { // not required - continue - } - - if m.Signatures[i] != nil { - if err := m.Signatures[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content envelope based on the context it is used -func (m *IntotoV002SchemaContentEnvelope) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateSignatures(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContentEnvelope) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.Signatures); i++ { - - if m.Signatures[i] != nil { - - if swag.IsZero(m.Signatures[i]) { // not required - return nil - } - - if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelope) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelope) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentEnvelope - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentEnvelopeSignaturesItems0 a signature of the envelope's payload along with the public key for the signature -// -// swagger:model IntotoV002SchemaContentEnvelopeSignaturesItems0 -type IntotoV002SchemaContentEnvelopeSignaturesItems0 struct { - - // optional id of the key used to create the signature - Keyid string `json:"keyid,omitempty"` - - // public key that corresponds to this signature - // Required: true - // Format: byte - PublicKey *strfmt.Base64 `json:"publicKey"` - - // signature of the payload - // Required: true - // Format: byte - Sig *strfmt.Base64 `json:"sig"` -} - -// Validate validates this intoto v002 schema content envelope signatures items0 -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSig(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validateSig(formats strfmt.Registry) error { - - if err := validate.Required("sig", "body", m.Sig); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this intoto v002 schema content envelope signatures items0 based on context it is used -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentEnvelopeSignaturesItems0 - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope -// -// swagger:model IntotoV002SchemaContentHash -type IntotoV002SchemaContentHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the archive - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v002 schema content hash -func (m *IntotoV002SchemaContentHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV002SchemaContentHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV002SchemaContentHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV002SchemaContentHashAlgorithmSha256 captures enum value "sha256" - IntotoV002SchemaContentHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV002SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV002SchemaContentHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV002SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content hash based on the context it is used -func (m *IntotoV002SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentHash) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope -// -// swagger:model IntotoV002SchemaContentPayloadHash -type IntotoV002SchemaContentPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value of the payload - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v002 schema content payload hash -func (m *IntotoV002SchemaContentPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV002SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256" - IntotoV002SchemaContentPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content payload hash based on the context it is used -func (m *IntotoV002SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentPayloadHash) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go deleted file mode 100644 index 3df3d21b..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Jar Java Archive (JAR) -// -// swagger:model jar -type Jar struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec JarSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Jar) Kind() string { - return "jar" -} - -// SetKind sets the kind of this subtype -func (m *Jar) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Jar) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec JarSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Jar - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Jar) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec JarSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this jar -func (m *Jar) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Jar) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Jar) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this jar based on the context it is used -func (m *Jar) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Jar) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Jar) UnmarshalBinary(b []byte) error { - var res Jar - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go deleted file mode 100644 index e7b9a590..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// JarSchema JAR Schema -// -// # Schema for JAR objects -// -// swagger:model jarSchema -type JarSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go deleted file mode 100644 index 4564964a..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go +++ /dev/null @@ -1,569 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// JarV001Schema JAR v0.0.1 Schema -// -// # Schema for JAR entries -// -// swagger:model jarV001Schema -type JarV001Schema struct { - - // archive - // Required: true - Archive *JarV001SchemaArchive `json:"archive"` - - // signature - Signature *JarV001SchemaSignature `json:"signature,omitempty"` -} - -// Validate validates this jar v001 schema -func (m *JarV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateArchive(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001Schema) validateArchive(formats strfmt.Registry) error { - - if err := validate.Required("archive", "body", m.Archive); err != nil { - return err - } - - if m.Archive != nil { - if err := m.Archive.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive") - } - return err - } - } - - return nil -} - -func (m *JarV001Schema) validateSignature(formats strfmt.Registry) error { - if swag.IsZero(m.Signature) { // not required - return nil - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this jar v001 schema based on the context it is used -func (m *JarV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateArchive(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001Schema) contextValidateArchive(ctx context.Context, formats strfmt.Registry) error { - - if m.Archive != nil { - - if err := m.Archive.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive") - } - return err - } - } - - return nil -} - -func (m *JarV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if swag.IsZero(m.Signature) { // not required - return nil - } - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001Schema) UnmarshalBinary(b []byte) error { - var res JarV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaArchive Information about the archive associated with the entry -// -// swagger:model JarV001SchemaArchive -type JarV001SchemaArchive struct { - - // Specifies the archive inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *JarV001SchemaArchiveHash `json:"hash,omitempty"` -} - -// Validate validates this jar v001 schema archive -func (m *JarV001SchemaArchive) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaArchive) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this jar v001 schema archive based on the context it is used -func (m *JarV001SchemaArchive) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaArchive) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive" + "." + "hash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaArchive) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaArchive) UnmarshalBinary(b []byte) error { - var res JarV001SchemaArchive - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaArchiveHash Specifies the hash algorithm and value encompassing the entire signed archive -// -// swagger:model JarV001SchemaArchiveHash -type JarV001SchemaArchiveHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the archive - // Required: true - Value *string `json:"value"` -} - -// Validate validates this jar v001 schema archive hash -func (m *JarV001SchemaArchiveHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var jarV001SchemaArchiveHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - jarV001SchemaArchiveHashTypeAlgorithmPropEnum = append(jarV001SchemaArchiveHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // JarV001SchemaArchiveHashAlgorithmSha256 captures enum value "sha256" - JarV001SchemaArchiveHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *JarV001SchemaArchiveHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, jarV001SchemaArchiveHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *JarV001SchemaArchiveHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("archive"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("archive"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *JarV001SchemaArchiveHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("archive"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this jar v001 schema archive hash based on context it is used -func (m *JarV001SchemaArchiveHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaArchiveHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaArchiveHash) UnmarshalBinary(b []byte) error { - var res JarV001SchemaArchiveHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaSignature Information about the included signature in the JAR file -// -// swagger:model JarV001SchemaSignature -type JarV001SchemaSignature struct { - - // Specifies the PKCS7 signature embedded within the JAR file - // Required: true - // Read Only: true - // Format: byte - Content strfmt.Base64 `json:"content"` - - // public key - // Required: true - PublicKey *JarV001SchemaSignaturePublicKey `json:"publicKey"` -} - -// Validate validates this jar v001 schema signature -func (m *JarV001SchemaSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaSignature) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -func (m *JarV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this jar v001 schema signature based on the context it is used -func (m *JarV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -func (m *JarV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaSignature) UnmarshalBinary(b []byte) error { - var res JarV001SchemaSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaSignaturePublicKey The X509 certificate containing the public key JAR which verifies the signature of the JAR -// -// swagger:model JarV001SchemaSignaturePublicKey -type JarV001SchemaSignaturePublicKey struct { - - // Specifies the content of the X509 certificate containing the public key used to verify the signature - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this jar v001 schema signature public key -func (m *JarV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this jar v001 schema signature public key based on the context it is used -func (m *JarV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { - var res JarV001SchemaSignaturePublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go deleted file mode 100644 index ee32ded4..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go +++ /dev/null @@ -1,445 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// LogEntry log entry -// -// swagger:model LogEntry -type LogEntry map[string]LogEntryAnon - -// Validate validates this log entry -func (m LogEntry) Validate(formats strfmt.Registry) error { - var res []error - - for k := range m { - - if swag.IsZero(m[k]) { // not required - continue - } - if val, ok := m[k]; ok { - if err := val.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName(k) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName(k) - } - return err - } - } - - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// ContextValidate validate this log entry based on the context it is used -func (m LogEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - for k := range m { - - if val, ok := m[k]; ok { - if err := val.ContextValidate(ctx, formats); err != nil { - return err - } - } - - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// LogEntryAnon log entry anon -// -// swagger:model LogEntryAnon -type LogEntryAnon struct { - - // attestation - Attestation *LogEntryAnonAttestation `json:"attestation,omitempty"` - - // body - // Required: true - Body interface{} `json:"body"` - - // The time the entry was added to the log as a Unix timestamp in seconds - // Required: true - IntegratedTime *int64 `json:"integratedTime"` - - // This is the SHA256 hash of the DER-encoded public key for the log at the time the entry was included in the log - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - LogID *string `json:"logID"` - - // log index - // Required: true - // Minimum: 0 - LogIndex *int64 `json:"logIndex"` - - // verification - Verification *LogEntryAnonVerification `json:"verification,omitempty"` -} - -// Validate validates this log entry anon -func (m *LogEntryAnon) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAttestation(formats); err != nil { - res = append(res, err) - } - - if err := m.validateBody(formats); err != nil { - res = append(res, err) - } - - if err := m.validateIntegratedTime(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogID(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogIndex(formats); err != nil { - res = append(res, err) - } - - if err := m.validateVerification(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnon) validateAttestation(formats strfmt.Registry) error { - if swag.IsZero(m.Attestation) { // not required - return nil - } - - if m.Attestation != nil { - if err := m.Attestation.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("attestation") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("attestation") - } - return err - } - } - - return nil -} - -func (m *LogEntryAnon) validateBody(formats strfmt.Registry) error { - - if m.Body == nil { - return errors.Required("body", "body", nil) - } - - return nil -} - -func (m *LogEntryAnon) validateIntegratedTime(formats strfmt.Registry) error { - - if err := validate.Required("integratedTime", "body", m.IntegratedTime); err != nil { - return err - } - - return nil -} - -func (m *LogEntryAnon) validateLogID(formats strfmt.Registry) error { - - if err := validate.Required("logID", "body", m.LogID); err != nil { - return err - } - - if err := validate.Pattern("logID", "body", *m.LogID, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *LogEntryAnon) validateLogIndex(formats strfmt.Registry) error { - - if err := validate.Required("logIndex", "body", m.LogIndex); err != nil { - return err - } - - if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil { - return err - } - - return nil -} - -func (m *LogEntryAnon) validateVerification(formats strfmt.Registry) error { - if swag.IsZero(m.Verification) { // not required - return nil - } - - if m.Verification != nil { - if err := m.Verification.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification") - } - return err - } - } - - return nil -} - -// ContextValidate validate this log entry anon based on the context it is used -func (m *LogEntryAnon) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateAttestation(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateVerification(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnon) contextValidateAttestation(ctx context.Context, formats strfmt.Registry) error { - - if m.Attestation != nil { - - if swag.IsZero(m.Attestation) { // not required - return nil - } - - if err := m.Attestation.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("attestation") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("attestation") - } - return err - } - } - - return nil -} - -func (m *LogEntryAnon) contextValidateVerification(ctx context.Context, formats strfmt.Registry) error { - - if m.Verification != nil { - - if swag.IsZero(m.Verification) { // not required - return nil - } - - if err := m.Verification.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *LogEntryAnon) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogEntryAnon) UnmarshalBinary(b []byte) error { - var res LogEntryAnon - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// LogEntryAnonAttestation log entry anon attestation -// -// swagger:model LogEntryAnonAttestation -type LogEntryAnonAttestation struct { - - // data - // Format: byte - Data strfmt.Base64 `json:"data,omitempty"` -} - -// Validate validates this log entry anon attestation -func (m *LogEntryAnonAttestation) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this log entry anon attestation based on context it is used -func (m *LogEntryAnonAttestation) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *LogEntryAnonAttestation) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogEntryAnonAttestation) UnmarshalBinary(b []byte) error { - var res LogEntryAnonAttestation - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// LogEntryAnonVerification log entry anon verification -// -// swagger:model LogEntryAnonVerification -type LogEntryAnonVerification struct { - - // inclusion proof - InclusionProof *InclusionProof `json:"inclusionProof,omitempty"` - - // Signature over the logID, logIndex, body and integratedTime. - // Format: byte - SignedEntryTimestamp strfmt.Base64 `json:"signedEntryTimestamp,omitempty"` -} - -// Validate validates this log entry anon verification -func (m *LogEntryAnonVerification) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateInclusionProof(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnonVerification) validateInclusionProof(formats strfmt.Registry) error { - if swag.IsZero(m.InclusionProof) { // not required - return nil - } - - if m.InclusionProof != nil { - if err := m.InclusionProof.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification" + "." + "inclusionProof") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification" + "." + "inclusionProof") - } - return err - } - } - - return nil -} - -// ContextValidate validate this log entry anon verification based on the context it is used -func (m *LogEntryAnonVerification) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateInclusionProof(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnonVerification) contextValidateInclusionProof(ctx context.Context, formats strfmt.Registry) error { - - if m.InclusionProof != nil { - - if swag.IsZero(m.InclusionProof) { // not required - return nil - } - - if err := m.InclusionProof.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification" + "." + "inclusionProof") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification" + "." + "inclusionProof") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *LogEntryAnonVerification) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogEntryAnonVerification) UnmarshalBinary(b []byte) error { - var res LogEntryAnonVerification - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go deleted file mode 100644 index cb57b27f..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go +++ /dev/null @@ -1,221 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// LogInfo log info -// -// swagger:model LogInfo -type LogInfo struct { - - // inactive shards - InactiveShards []*InactiveShardLogInfo `json:"inactiveShards"` - - // The current hash value stored at the root of the merkle tree - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` - - // The current signed tree head - // Required: true - SignedTreeHead *string `json:"signedTreeHead"` - - // The current treeID - // Required: true - // Pattern: ^[0-9]+$ - TreeID *string `json:"treeID"` - - // The current number of nodes in the merkle tree - // Required: true - // Minimum: 1 - TreeSize *int64 `json:"treeSize"` -} - -// Validate validates this log info -func (m *LogInfo) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateInactiveShards(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignedTreeHead(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeID(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeSize(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogInfo) validateInactiveShards(formats strfmt.Registry) error { - if swag.IsZero(m.InactiveShards) { // not required - return nil - } - - for i := 0; i < len(m.InactiveShards); i++ { - if swag.IsZero(m.InactiveShards[i]) { // not required - continue - } - - if m.InactiveShards[i] != nil { - if err := m.InactiveShards[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -func (m *LogInfo) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *LogInfo) validateSignedTreeHead(formats strfmt.Registry) error { - - if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil { - return err - } - - return nil -} - -func (m *LogInfo) validateTreeID(formats strfmt.Registry) error { - - if err := validate.Required("treeID", "body", m.TreeID); err != nil { - return err - } - - if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { - return err - } - - return nil -} - -func (m *LogInfo) validateTreeSize(formats strfmt.Registry) error { - - if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { - return err - } - - if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this log info based on the context it is used -func (m *LogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateInactiveShards(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogInfo) contextValidateInactiveShards(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.InactiveShards); i++ { - - if m.InactiveShards[i] != nil { - - if swag.IsZero(m.InactiveShards[i]) { // not required - return nil - } - - if err := m.InactiveShards[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *LogInfo) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogInfo) UnmarshalBinary(b []byte) error { - var res LogInfo - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go deleted file mode 100644 index 5b734a5f..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go +++ /dev/null @@ -1,195 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/validate" -) - -// ProposedEntry proposed entry -// -// swagger:discriminator ProposedEntry kind -type ProposedEntry interface { - runtime.Validatable - runtime.ContextValidatable - - // kind - // Required: true - Kind() string - SetKind(string) - - // AdditionalProperties in base type shoud be handled just like regular properties - // At this moment, the base type property is pushed down to the subtype -} - -type proposedEntry struct { - kindField string -} - -// Kind gets the kind of this polymorphic type -func (m *proposedEntry) Kind() string { - return "ProposedEntry" -} - -// SetKind sets the kind of this polymorphic type -func (m *proposedEntry) SetKind(val string) { -} - -// UnmarshalProposedEntrySlice unmarshals polymorphic slices of ProposedEntry -func UnmarshalProposedEntrySlice(reader io.Reader, consumer runtime.Consumer) ([]ProposedEntry, error) { - var elements []json.RawMessage - if err := consumer.Consume(reader, &elements); err != nil { - return nil, err - } - - var result []ProposedEntry - for _, element := range elements { - obj, err := unmarshalProposedEntry(element, consumer) - if err != nil { - return nil, err - } - result = append(result, obj) - } - return result, nil -} - -// UnmarshalProposedEntry unmarshals polymorphic ProposedEntry -func UnmarshalProposedEntry(reader io.Reader, consumer runtime.Consumer) (ProposedEntry, error) { - // we need to read this twice, so first into a buffer - data, err := io.ReadAll(reader) - if err != nil { - return nil, err - } - return unmarshalProposedEntry(data, consumer) -} - -func unmarshalProposedEntry(data []byte, consumer runtime.Consumer) (ProposedEntry, error) { - buf := bytes.NewBuffer(data) - buf2 := bytes.NewBuffer(data) - - // the first time this is read is to fetch the value of the kind property. - var getType struct { - Kind string `json:"kind"` - } - if err := consumer.Consume(buf, &getType); err != nil { - return nil, err - } - - if err := validate.RequiredString("kind", "body", getType.Kind); err != nil { - return nil, err - } - - // The value of kind is used to determine which type to create and unmarshal the data into - switch getType.Kind { - case "ProposedEntry": - var result proposedEntry - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "alpine": - var result Alpine - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "cose": - var result Cose - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "dsse": - var result DSSE - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "hashedrekord": - var result Hashedrekord - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "helm": - var result Helm - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "intoto": - var result Intoto - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "jar": - var result Jar - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "rekord": - var result Rekord - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "rfc3161": - var result Rfc3161 - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "rpm": - var result Rpm - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "tuf": - var result TUF - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - } - return nil, errors.New(422, "invalid kind value: %q", getType.Kind) -} - -// Validate validates this proposed entry -func (m *proposedEntry) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this proposed entry based on context it is used -func (m *proposedEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go deleted file mode 100644 index 81c8ff05..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rekord Rekord object -// -// swagger:model rekord -type Rekord struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RekordSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Rekord) Kind() string { - return "rekord" -} - -// SetKind sets the kind of this subtype -func (m *Rekord) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Rekord) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RekordSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Rekord - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Rekord) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RekordSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this rekord -func (m *Rekord) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rekord) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Rekord) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this rekord based on the context it is used -func (m *Rekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Rekord) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rekord) UnmarshalBinary(b []byte) error { - var res Rekord - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go deleted file mode 100644 index e85442ae..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// RekordSchema Rekor Schema -// -// # Schema for Rekord objects -// -// swagger:model rekordSchema -type RekordSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go deleted file mode 100644 index 9a525717..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go +++ /dev/null @@ -1,611 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// RekordV001Schema Rekor v0.0.1 Schema -// -// # Schema for Rekord object -// -// swagger:model rekordV001Schema -type RekordV001Schema struct { - - // data - // Required: true - Data *RekordV001SchemaData `json:"data"` - - // signature - // Required: true - Signature *RekordV001SchemaSignature `json:"signature"` -} - -// Validate validates this rekord v001 schema -func (m *RekordV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateData(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001Schema) validateData(formats strfmt.Registry) error { - - if err := validate.Required("data", "body", m.Data); err != nil { - return err - } - - if m.Data != nil { - if err := m.Data.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *RekordV001Schema) validateSignature(formats strfmt.Registry) error { - - if err := validate.Required("signature", "body", m.Signature); err != nil { - return err - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rekord v001 schema based on the context it is used -func (m *RekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateData(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { - - if m.Data != nil { - - if err := m.Data.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *RekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001Schema) UnmarshalBinary(b []byte) error { - var res RekordV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaData Information about the content associated with the entry -// -// swagger:model RekordV001SchemaData -type RekordV001SchemaData struct { - - // Specifies the content inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *RekordV001SchemaDataHash `json:"hash,omitempty"` -} - -// Validate validates this rekord v001 schema data -func (m *RekordV001SchemaData) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaData) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rekord v001 schema data based on the context it is used -func (m *RekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaData) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaData) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaData - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaDataHash Specifies the hash algorithm and value for the content -// -// swagger:model RekordV001SchemaDataHash -type RekordV001SchemaDataHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the content - // Required: true - Value *string `json:"value"` -} - -// Validate validates this rekord v001 schema data hash -func (m *RekordV001SchemaDataHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var rekordV001SchemaDataHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - rekordV001SchemaDataHashTypeAlgorithmPropEnum = append(rekordV001SchemaDataHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // RekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" - RekordV001SchemaDataHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *RekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, rekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *RekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *RekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this rekord v001 schema data hash based on the context it is used -func (m *RekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaDataHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaDataHash) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaDataHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaSignature Information about the detached signature associated with the entry -// -// swagger:model RekordV001SchemaSignature -type RekordV001SchemaSignature struct { - - // Specifies the content of the signature inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` - - // Specifies the format of the signature - // Required: true - // Enum: [pgp minisign x509 ssh] - Format *string `json:"format"` - - // public key - // Required: true - PublicKey *RekordV001SchemaSignaturePublicKey `json:"publicKey"` -} - -// Validate validates this rekord v001 schema signature -func (m *RekordV001SchemaSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validateFormat(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaSignature) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -var rekordV001SchemaSignatureTypeFormatPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["pgp","minisign","x509","ssh"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - rekordV001SchemaSignatureTypeFormatPropEnum = append(rekordV001SchemaSignatureTypeFormatPropEnum, v) - } -} - -const ( - - // RekordV001SchemaSignatureFormatPgp captures enum value "pgp" - RekordV001SchemaSignatureFormatPgp string = "pgp" - - // RekordV001SchemaSignatureFormatMinisign captures enum value "minisign" - RekordV001SchemaSignatureFormatMinisign string = "minisign" - - // RekordV001SchemaSignatureFormatX509 captures enum value "x509" - RekordV001SchemaSignatureFormatX509 string = "x509" - - // RekordV001SchemaSignatureFormatSSH captures enum value "ssh" - RekordV001SchemaSignatureFormatSSH string = "ssh" -) - -// prop value enum -func (m *RekordV001SchemaSignature) validateFormatEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, rekordV001SchemaSignatureTypeFormatPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *RekordV001SchemaSignature) validateFormat(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"format", "body", m.Format); err != nil { - return err - } - - // value enum - if err := m.validateFormatEnum("signature"+"."+"format", "body", *m.Format); err != nil { - return err - } - - return nil -} - -func (m *RekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rekord v001 schema signature based on the context it is used -func (m *RekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaSignature) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaSignaturePublicKey The public key that can verify the signature -// -// swagger:model RekordV001SchemaSignaturePublicKey -type RekordV001SchemaSignaturePublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this rekord v001 schema signature public key -func (m *RekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rekord v001 schema signature public key based on context it is used -func (m *RekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaSignaturePublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go deleted file mode 100644 index ef8d42e7..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rfc3161 RFC3161 Timestamp -// -// swagger:model rfc3161 -type Rfc3161 struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec Rfc3161Schema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Rfc3161) Kind() string { - return "rfc3161" -} - -// SetKind sets the kind of this subtype -func (m *Rfc3161) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Rfc3161) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec Rfc3161Schema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Rfc3161 - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Rfc3161) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec Rfc3161Schema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this rfc3161 -func (m *Rfc3161) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Rfc3161) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this rfc3161 based on the context it is used -func (m *Rfc3161) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Rfc3161) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rfc3161) UnmarshalBinary(b []byte) error { - var res Rfc3161 - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go deleted file mode 100644 index 826013a2..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Rfc3161Schema Timestamp Schema -// -// # Schema for RFC 3161 timestamp objects -// -// swagger:model rfc3161Schema -type Rfc3161Schema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go deleted file mode 100644 index c3a50c84..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go +++ /dev/null @@ -1,183 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rfc3161V001Schema Timestamp v0.0.1 Schema -// -// # Schema for RFC3161 entries -// -// swagger:model rfc3161V001Schema -type Rfc3161V001Schema struct { - - // tsr - // Required: true - Tsr *Rfc3161V001SchemaTsr `json:"tsr"` -} - -// Validate validates this rfc3161 v001 schema -func (m *Rfc3161V001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateTsr(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161V001Schema) validateTsr(formats strfmt.Registry) error { - - if err := validate.Required("tsr", "body", m.Tsr); err != nil { - return err - } - - if m.Tsr != nil { - if err := m.Tsr.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("tsr") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("tsr") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rfc3161 v001 schema based on the context it is used -func (m *Rfc3161V001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateTsr(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161V001Schema) contextValidateTsr(ctx context.Context, formats strfmt.Registry) error { - - if m.Tsr != nil { - - if err := m.Tsr.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("tsr") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("tsr") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *Rfc3161V001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rfc3161V001Schema) UnmarshalBinary(b []byte) error { - var res Rfc3161V001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// Rfc3161V001SchemaTsr Information about the tsr file associated with the entry -// -// swagger:model Rfc3161V001SchemaTsr -type Rfc3161V001SchemaTsr struct { - - // Specifies the tsr file content inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this rfc3161 v001 schema tsr -func (m *Rfc3161V001SchemaTsr) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161V001SchemaTsr) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("tsr"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rfc3161 v001 schema tsr based on context it is used -func (m *Rfc3161V001SchemaTsr) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *Rfc3161V001SchemaTsr) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rfc3161V001SchemaTsr) UnmarshalBinary(b []byte) error { - var res Rfc3161V001SchemaTsr - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go deleted file mode 100644 index 8b1f10c7..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rpm RPM package -// -// swagger:model rpm -type Rpm struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RpmSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Rpm) Kind() string { - return "rpm" -} - -// SetKind sets the kind of this subtype -func (m *Rpm) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Rpm) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RpmSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Rpm - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Rpm) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RpmSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this rpm -func (m *Rpm) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rpm) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Rpm) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this rpm based on the context it is used -func (m *Rpm) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Rpm) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rpm) UnmarshalBinary(b []byte) error { - var res Rpm - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go deleted file mode 100644 index 5cb37836..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// RpmSchema RPM Schema -// -// # Schema for RPM objects -// -// swagger:model rpmSchema -type RpmSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go deleted file mode 100644 index 80dadde7..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go +++ /dev/null @@ -1,450 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// RpmV001Schema RPM v0.0.1 Schema -// -// # Schema for RPM entries -// -// swagger:model rpmV001Schema -type RpmV001Schema struct { - - // package - // Required: true - Package *RpmV001SchemaPackage `json:"package"` - - // public key - // Required: true - PublicKey *RpmV001SchemaPublicKey `json:"publicKey"` -} - -// Validate validates this rpm v001 schema -func (m *RpmV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePackage(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001Schema) validatePackage(formats strfmt.Registry) error { - - if err := validate.Required("package", "body", m.Package); err != nil { - return err - } - - if m.Package != nil { - if err := m.Package.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *RpmV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rpm v001 schema based on the context it is used -func (m *RpmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePackage(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { - - if m.Package != nil { - - if err := m.Package.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *RpmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001Schema) UnmarshalBinary(b []byte) error { - var res RpmV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RpmV001SchemaPackage Information about the package associated with the entry -// -// swagger:model RpmV001SchemaPackage -type RpmV001SchemaPackage struct { - - // Specifies the package inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *RpmV001SchemaPackageHash `json:"hash,omitempty"` - - // Values of the RPM headers - // Read Only: true - Headers map[string]string `json:"headers,omitempty"` -} - -// Validate validates this rpm v001 schema package -func (m *RpmV001SchemaPackage) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001SchemaPackage) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rpm v001 schema package based on the context it is used -func (m *RpmV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateHeaders(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *RpmV001SchemaPackage) contextValidateHeaders(ctx context.Context, formats strfmt.Registry) error { - - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001SchemaPackage) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001SchemaPackage) UnmarshalBinary(b []byte) error { - var res RpmV001SchemaPackage - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RpmV001SchemaPackageHash Specifies the hash algorithm and value for the package -// -// swagger:model RpmV001SchemaPackageHash -type RpmV001SchemaPackageHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: [sha256] - Algorithm *string `json:"algorithm"` - - // The hash value for the package - // Required: true - Value *string `json:"value"` -} - -// Validate validates this rpm v001 schema package hash -func (m *RpmV001SchemaPackageHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var rpmV001SchemaPackageHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - rpmV001SchemaPackageHashTypeAlgorithmPropEnum = append(rpmV001SchemaPackageHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // RpmV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256" - RpmV001SchemaPackageHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *RpmV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, rpmV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *RpmV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *RpmV001SchemaPackageHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rpm v001 schema package hash based on context it is used -func (m *RpmV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001SchemaPackageHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001SchemaPackageHash) UnmarshalBinary(b []byte) error { - var res RpmV001SchemaPackageHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RpmV001SchemaPublicKey The PGP public key that can verify the RPM signature -// -// swagger:model RpmV001SchemaPublicKey -type RpmV001SchemaPublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this rpm v001 schema public key -func (m *RpmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rpm v001 schema public key based on context it is used -func (m *RpmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001SchemaPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001SchemaPublicKey) UnmarshalBinary(b []byte) error { - var res RpmV001SchemaPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go deleted file mode 100644 index bb1ccccc..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go +++ /dev/null @@ -1,341 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// SearchIndex search index -// -// swagger:model SearchIndex -type SearchIndex struct { - - // email - // Format: email - Email strfmt.Email `json:"email,omitempty"` - - // hash - // Pattern: ^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$ - Hash string `json:"hash,omitempty"` - - // operator - // Enum: [and or] - Operator string `json:"operator,omitempty"` - - // public key - PublicKey *SearchIndexPublicKey `json:"publicKey,omitempty"` -} - -// Validate validates this search index -func (m *SearchIndex) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEmail(formats); err != nil { - res = append(res, err) - } - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateOperator(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchIndex) validateEmail(formats strfmt.Registry) error { - if swag.IsZero(m.Email) { // not required - return nil - } - - if err := validate.FormatOf("email", "body", "email", m.Email.String(), formats); err != nil { - return err - } - - return nil -} - -func (m *SearchIndex) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := validate.Pattern("hash", "body", m.Hash, `^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$`); err != nil { - return err - } - - return nil -} - -var searchIndexTypeOperatorPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["and","or"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - searchIndexTypeOperatorPropEnum = append(searchIndexTypeOperatorPropEnum, v) - } -} - -const ( - - // SearchIndexOperatorAnd captures enum value "and" - SearchIndexOperatorAnd string = "and" - - // SearchIndexOperatorOr captures enum value "or" - SearchIndexOperatorOr string = "or" -) - -// prop value enum -func (m *SearchIndex) validateOperatorEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, searchIndexTypeOperatorPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *SearchIndex) validateOperator(formats strfmt.Registry) error { - if swag.IsZero(m.Operator) { // not required - return nil - } - - // value enum - if err := m.validateOperatorEnum("operator", "body", m.Operator); err != nil { - return err - } - - return nil -} - -func (m *SearchIndex) validatePublicKey(formats strfmt.Registry) error { - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this search index based on the context it is used -func (m *SearchIndex) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchIndex) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *SearchIndex) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *SearchIndex) UnmarshalBinary(b []byte) error { - var res SearchIndex - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// SearchIndexPublicKey search index public key -// -// swagger:model SearchIndexPublicKey -type SearchIndexPublicKey struct { - - // content - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // format - // Required: true - // Enum: [pgp x509 minisign ssh tuf] - Format *string `json:"format"` - - // url - // Format: uri - URL strfmt.URI `json:"url,omitempty"` -} - -// Validate validates this search index public key -func (m *SearchIndexPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateFormat(formats); err != nil { - res = append(res, err) - } - - if err := m.validateURL(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var searchIndexPublicKeyTypeFormatPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["pgp","x509","minisign","ssh","tuf"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - searchIndexPublicKeyTypeFormatPropEnum = append(searchIndexPublicKeyTypeFormatPropEnum, v) - } -} - -const ( - - // SearchIndexPublicKeyFormatPgp captures enum value "pgp" - SearchIndexPublicKeyFormatPgp string = "pgp" - - // SearchIndexPublicKeyFormatX509 captures enum value "x509" - SearchIndexPublicKeyFormatX509 string = "x509" - - // SearchIndexPublicKeyFormatMinisign captures enum value "minisign" - SearchIndexPublicKeyFormatMinisign string = "minisign" - - // SearchIndexPublicKeyFormatSSH captures enum value "ssh" - SearchIndexPublicKeyFormatSSH string = "ssh" - - // SearchIndexPublicKeyFormatTUF captures enum value "tuf" - SearchIndexPublicKeyFormatTUF string = "tuf" -) - -// prop value enum -func (m *SearchIndexPublicKey) validateFormatEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, searchIndexPublicKeyTypeFormatPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *SearchIndexPublicKey) validateFormat(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"format", "body", m.Format); err != nil { - return err - } - - // value enum - if err := m.validateFormatEnum("publicKey"+"."+"format", "body", *m.Format); err != nil { - return err - } - - return nil -} - -func (m *SearchIndexPublicKey) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } - - if err := validate.FormatOf("publicKey"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this search index public key based on context it is used -func (m *SearchIndexPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *SearchIndexPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *SearchIndexPublicKey) UnmarshalBinary(b []byte) error { - var res SearchIndexPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go deleted file mode 100644 index 425ec8b3..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - "io" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// SearchLogQuery search log query -// -// swagger:model SearchLogQuery -type SearchLogQuery struct { - entriesField []ProposedEntry - - // entry u UI ds - // Max Items: 10 - // Min Items: 1 - EntryUUIDs []string `json:"entryUUIDs"` - - // log indexes - // Max Items: 10 - // Min Items: 1 - LogIndexes []*int64 `json:"logIndexes"` -} - -// Entries gets the entries of this base type -func (m *SearchLogQuery) Entries() []ProposedEntry { - return m.entriesField -} - -// SetEntries sets the entries of this base type -func (m *SearchLogQuery) SetEntries(val []ProposedEntry) { - m.entriesField = val -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *SearchLogQuery) UnmarshalJSON(raw []byte) error { - var data struct { - Entries json.RawMessage `json:"entries"` - - EntryUUIDs []string `json:"entryUUIDs"` - - LogIndexes []*int64 `json:"logIndexes"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var propEntries []ProposedEntry - if string(data.Entries) != "null" { - entries, err := UnmarshalProposedEntrySlice(bytes.NewBuffer(data.Entries), runtime.JSONConsumer()) - if err != nil && err != io.EOF { - return err - } - propEntries = entries - } - - var result SearchLogQuery - - // entries - result.entriesField = propEntries - - // entryUUIDs - result.EntryUUIDs = data.EntryUUIDs - - // logIndexes - result.LogIndexes = data.LogIndexes - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m SearchLogQuery) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - EntryUUIDs []string `json:"entryUUIDs"` - - LogIndexes []*int64 `json:"logIndexes"` - }{ - - EntryUUIDs: m.EntryUUIDs, - - LogIndexes: m.LogIndexes, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Entries []ProposedEntry `json:"entries"` - }{ - - Entries: m.entriesField, - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this search log query -func (m *SearchLogQuery) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEntries(formats); err != nil { - res = append(res, err) - } - - if err := m.validateEntryUUIDs(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogIndexes(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchLogQuery) validateEntries(formats strfmt.Registry) error { - if swag.IsZero(m.Entries()) { // not required - return nil - } - - iEntriesSize := int64(len(m.Entries())) - - if err := validate.MinItems("entries", "body", iEntriesSize, 1); err != nil { - return err - } - - if err := validate.MaxItems("entries", "body", iEntriesSize, 10); err != nil { - return err - } - - for i := 0; i < len(m.Entries()); i++ { - - if err := m.entriesField[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("entries" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("entries" + "." + strconv.Itoa(i)) - } - return err - } - - } - - return nil -} - -func (m *SearchLogQuery) validateEntryUUIDs(formats strfmt.Registry) error { - if swag.IsZero(m.EntryUUIDs) { // not required - return nil - } - - iEntryUUIDsSize := int64(len(m.EntryUUIDs)) - - if err := validate.MinItems("entryUUIDs", "body", iEntryUUIDsSize, 1); err != nil { - return err - } - - if err := validate.MaxItems("entryUUIDs", "body", iEntryUUIDsSize, 10); err != nil { - return err - } - - for i := 0; i < len(m.EntryUUIDs); i++ { - - if err := validate.Pattern("entryUUIDs"+"."+strconv.Itoa(i), "body", m.EntryUUIDs[i], `^([0-9a-fA-F]{64}|[0-9a-fA-F]{80})$`); err != nil { - return err - } - - } - - return nil -} - -func (m *SearchLogQuery) validateLogIndexes(formats strfmt.Registry) error { - if swag.IsZero(m.LogIndexes) { // not required - return nil - } - - iLogIndexesSize := int64(len(m.LogIndexes)) - - if err := validate.MinItems("logIndexes", "body", iLogIndexesSize, 1); err != nil { - return err - } - - if err := validate.MaxItems("logIndexes", "body", iLogIndexesSize, 10); err != nil { - return err - } - - for i := 0; i < len(m.LogIndexes); i++ { - if swag.IsZero(m.LogIndexes[i]) { // not required - continue - } - - if err := validate.MinimumInt("logIndexes"+"."+strconv.Itoa(i), "body", *m.LogIndexes[i], 0, false); err != nil { - return err - } - - } - - return nil -} - -// ContextValidate validate this search log query based on the context it is used -func (m *SearchLogQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEntries(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchLogQuery) contextValidateEntries(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.Entries()); i++ { - - if swag.IsZero(m.entriesField[i]) { // not required - return nil - } - - if err := m.entriesField[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("entries" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("entries" + "." + strconv.Itoa(i)) - } - return err - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *SearchLogQuery) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *SearchLogQuery) UnmarshalBinary(b []byte) error { - var res SearchLogQuery - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go deleted file mode 100644 index a5f6eff0..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// TUF TUF metadata -// -// swagger:model tuf -type TUF struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec TUFSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *TUF) Kind() string { - return "tuf" -} - -// SetKind sets the kind of this subtype -func (m *TUF) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *TUF) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec TUFSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result TUF - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m TUF) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec TUFSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this tuf -func (m *TUF) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUF) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *TUF) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this tuf based on the context it is used -func (m *TUF) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *TUF) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUF) UnmarshalBinary(b []byte) error { - var res TUF - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go deleted file mode 100644 index 37dca8b6..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// TUFSchema TUF Schema -// -// # Schema for TUF metadata objects -// -// swagger:model tufSchema -type TUFSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go deleted file mode 100644 index 021e0ce7..00000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go +++ /dev/null @@ -1,304 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// TUFV001Schema TUF v0.0.1 Schema -// -// # Schema for TUF metadata entries -// -// swagger:model tufV001Schema -type TUFV001Schema struct { - - // metadata - // Required: true - Metadata *TUFV001SchemaMetadata `json:"metadata"` - - // root - // Required: true - Root *TUFV001SchemaRoot `json:"root"` - - // TUF specification version - // Read Only: true - SpecVersion string `json:"spec_version,omitempty"` -} - -// Validate validates this tuf v001 schema -func (m *TUFV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateMetadata(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRoot(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001Schema) validateMetadata(formats strfmt.Registry) error { - - if err := validate.Required("metadata", "body", m.Metadata); err != nil { - return err - } - - if m.Metadata != nil { - if err := m.Metadata.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("metadata") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("metadata") - } - return err - } - } - - return nil -} - -func (m *TUFV001Schema) validateRoot(formats strfmt.Registry) error { - - if err := validate.Required("root", "body", m.Root); err != nil { - return err - } - - if m.Root != nil { - if err := m.Root.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("root") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("root") - } - return err - } - } - - return nil -} - -// ContextValidate validate this tuf v001 schema based on the context it is used -func (m *TUFV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateMetadata(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateRoot(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSpecVersion(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001Schema) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error { - - if m.Metadata != nil { - - if err := m.Metadata.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("metadata") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("metadata") - } - return err - } - } - - return nil -} - -func (m *TUFV001Schema) contextValidateRoot(ctx context.Context, formats strfmt.Registry) error { - - if m.Root != nil { - - if err := m.Root.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("root") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("root") - } - return err - } - } - - return nil -} - -func (m *TUFV001Schema) contextValidateSpecVersion(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "spec_version", "body", string(m.SpecVersion)); err != nil { - return err - } - - return nil -} - -// MarshalBinary interface implementation -func (m *TUFV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUFV001Schema) UnmarshalBinary(b []byte) error { - var res TUFV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// TUFV001SchemaMetadata TUF metadata -// -// swagger:model TUFV001SchemaMetadata -type TUFV001SchemaMetadata struct { - - // Specifies the metadata inline within the document - // Required: true - Content interface{} `json:"content"` -} - -// Validate validates this TUF v001 schema metadata -func (m *TUFV001SchemaMetadata) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001SchemaMetadata) validateContent(formats strfmt.Registry) error { - - if m.Content == nil { - return errors.Required("metadata"+"."+"content", "body", nil) - } - - return nil -} - -// ContextValidate validates this TUF v001 schema metadata based on context it is used -func (m *TUFV001SchemaMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *TUFV001SchemaMetadata) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUFV001SchemaMetadata) UnmarshalBinary(b []byte) error { - var res TUFV001SchemaMetadata - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// TUFV001SchemaRoot root metadata containing about the public keys used to sign the manifest -// -// swagger:model TUFV001SchemaRoot -type TUFV001SchemaRoot struct { - - // Specifies the metadata inline within the document - // Required: true - Content interface{} `json:"content"` -} - -// Validate validates this TUF v001 schema root -func (m *TUFV001SchemaRoot) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001SchemaRoot) validateContent(formats strfmt.Registry) error { - - if m.Content == nil { - return errors.Required("root"+"."+"content", "body", nil) - } - - return nil -} - -// ContextValidate validates this TUF v001 schema root based on context it is used -func (m *TUFV001SchemaRoot) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *TUFV001SchemaRoot) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUFV001SchemaRoot) UnmarshalBinary(b []byte) error { - var res TUFV001SchemaRoot - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go b/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go new file mode 100644 index 00000000..02c032b0 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/algorithm_registry.go @@ -0,0 +1,314 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "errors" + "fmt" + + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" +) + +// PublicKeyType represents the public key algorithm for a given signature algorithm. +type PublicKeyType uint + +const ( + // RSA public key + RSA PublicKeyType = iota + // ECDSA public key + ECDSA + // ED25519 public key + ED25519 +) + +// RSAKeySize represents the size of an RSA public key in bits. +type RSAKeySize int + +// AlgorithmDetails exposes relevant information for a given signature algorithm. +type AlgorithmDetails struct { + // knownAlgorithm is the signature algorithm that the following details refer to. + knownAlgorithm v1.PublicKeyDetails + + // keyType is the public key algorithm being used. + keyType PublicKeyType + + // hashType is the hash algorithm being used. + hashType crypto.Hash + + // protoHashType is the hash algorithm being used as a proto message, it must be the protobuf-specs + // v1.HashAlgorithm equivalent of the hashType. + protoHashType v1.HashAlgorithm + + // extraKeyParams contains any extra parameters required to check a given public key against this entry. + // + // The underlying type of these parameters is dependent on the keyType. + // For example, ECDSA algorithms will store an elliptic curve here whereas, RSA keys will store the key size. + // Algorithms that don't require any extra parameters leave this set to nil. + extraKeyParams interface{} + + // flagValue is a string representation of the signature algorithm that follows the naming conventions of CLI + // arguments that are used for Sigstore services. + flagValue string +} + +// GetSignatureAlgorithm returns the PublicKeyDetails associated with the algorithm details. +func (a AlgorithmDetails) GetSignatureAlgorithm() v1.PublicKeyDetails { + return a.knownAlgorithm +} + +// GetKeyType returns the PublicKeyType for the algorithm details. +func (a AlgorithmDetails) GetKeyType() PublicKeyType { + return a.keyType +} + +// GetHashType returns the hash algorithm that should be used with this algorithm. +func (a AlgorithmDetails) GetHashType() crypto.Hash { + return a.hashType +} + +// GetProtoHashType is a convenience method to get the protobuf-specs type of the hash algorithm. +func (a AlgorithmDetails) GetProtoHashType() v1.HashAlgorithm { + return a.protoHashType +} + +// GetRSAKeySize returns the RSA key size for the algorithm details, if the key type is RSA. +func (a AlgorithmDetails) GetRSAKeySize() (RSAKeySize, error) { + if a.keyType != RSA { + return 0, fmt.Errorf("unable to retrieve RSA key size for key type: %T", a.keyType) + } + rsaKeySize, ok := a.extraKeyParams.(RSAKeySize) + if !ok { + // This should be unreachable. + return 0, fmt.Errorf("unable to retrieve key size for RSA, malformed algorithm details?: %T", a.keyType) + } + return rsaKeySize, nil +} + +// GetECDSACurve returns the elliptic curve for the algorithm details, if the key type is ECDSA. +func (a AlgorithmDetails) GetECDSACurve() (*elliptic.Curve, error) { + if a.keyType != ECDSA { + return nil, fmt.Errorf("unable to retrieve ECDSA curve for key type: %T", a.keyType) + } + ecdsaCurve, ok := a.extraKeyParams.(elliptic.Curve) + if !ok { + // This should be unreachable. + return nil, fmt.Errorf("unable to retrieve curve for ECDSA, malformed algorithm details?: %T", a.keyType) + } + return &ecdsaCurve, nil +} + +func (a AlgorithmDetails) checkKey(pubKey crypto.PublicKey) (bool, error) { + switch a.keyType { + case RSA: + rsaKey, ok := pubKey.(*rsa.PublicKey) + if !ok { + return false, nil + } + keySize, err := a.GetRSAKeySize() + if err != nil { + return false, err + } + return rsaKey.Size()*8 == int(keySize), nil + case ECDSA: + ecdsaKey, ok := pubKey.(*ecdsa.PublicKey) + if !ok { + return false, nil + } + curve, err := a.GetECDSACurve() + if err != nil { + return false, err + } + return ecdsaKey.Curve == *curve, nil + case ED25519: + _, ok := pubKey.(ed25519.PublicKey) + return ok, nil + } + return false, fmt.Errorf("unrecognized key type: %T", a.keyType) +} + +func (a AlgorithmDetails) checkHash(hashType crypto.Hash) bool { + return a.hashType == hashType +} + +// Note that deprecated options in PublicKeyDetails are not included in this +// list, including PKCS1v1.5 encoded RSA. Refer to the v1.PublicKeyDetails enum +// for more details. +var supportedAlgorithms = []AlgorithmDetails{ + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(2048), "rsa-sign-pkcs1-2048-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(3072), "rsa-sign-pkcs1-3072-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pkcs1-4096-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(2048), "rsa-sign-pss-2048-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(3072), "rsa-sign-pss-3072-sha256"}, + {v1.PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256, RSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, RSAKeySize(4096), "rsa-sign-pss-4092-sha256"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P256(), "ecdsa-sha2-256-nistp256"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, ECDSA, crypto.SHA384, v1.HashAlgorithm_SHA2_384, elliptic.P384(), "ecdsa-sha2-384-nistp384"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P384(), "ecdsa-sha2-256-nistp384"}, //nolint:staticcheck + {v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512, ECDSA, crypto.SHA512, v1.HashAlgorithm_SHA2_512, elliptic.P521(), "ecdsa-sha2-512-nistp521"}, + {v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_256, ECDSA, crypto.SHA256, v1.HashAlgorithm_SHA2_256, elliptic.P521(), "ecdsa-sha2-256-nistp521"}, //nolint:staticcheck + {v1.PublicKeyDetails_PKIX_ED25519, ED25519, crypto.Hash(0), v1.HashAlgorithm_HASH_ALGORITHM_UNSPECIFIED, nil, "ed25519"}, + {v1.PublicKeyDetails_PKIX_ED25519_PH, ED25519, crypto.SHA512, v1.HashAlgorithm_SHA2_512, nil, "ed25519-ph"}, +} + +// AlgorithmRegistryConfig represents a set of permitted algorithms for a given Sigstore service or component. +// +// Individual services may wish to restrict what algorithms are allowed to a subset of what is covered in the algorithm +// registry (represented by v1.PublicKeyDetails). +type AlgorithmRegistryConfig struct { + permittedAlgorithms []AlgorithmDetails +} + +// GetAlgorithmDetails retrieves a set of details for a given v1.PublicKeyDetails flag that allows users to +// introspect the public key algorithm, hash algorithm and more. +func GetAlgorithmDetails(knownSignatureAlgorithm v1.PublicKeyDetails) (AlgorithmDetails, error) { + for _, detail := range supportedAlgorithms { + if detail.knownAlgorithm == knownSignatureAlgorithm { + return detail, nil + } + } + return AlgorithmDetails{}, fmt.Errorf("could not find algorithm details for known signature algorithm: %s", knownSignatureAlgorithm) +} + +// NewAlgorithmRegistryConfig creates a new AlgorithmRegistryConfig for a set of permitted signature algorithms. +func NewAlgorithmRegistryConfig(algorithmConfig []v1.PublicKeyDetails) (*AlgorithmRegistryConfig, error) { + permittedAlgorithms := make([]AlgorithmDetails, 0, len(supportedAlgorithms)) + for _, algorithm := range algorithmConfig { + a, err := GetAlgorithmDetails(algorithm) + if err != nil { + return nil, err + } + permittedAlgorithms = append(permittedAlgorithms, a) + } + return &AlgorithmRegistryConfig{permittedAlgorithms: permittedAlgorithms}, nil +} + +// IsAlgorithmPermitted checks whether a given public key/hash algorithm combination is permitted by a registry config. +func (registryConfig AlgorithmRegistryConfig) IsAlgorithmPermitted(key crypto.PublicKey, hash crypto.Hash) (bool, error) { + for _, algorithm := range registryConfig.permittedAlgorithms { + keyMatch, err := algorithm.checkKey(key) + if err != nil { + return false, err + } + if keyMatch && algorithm.checkHash(hash) { + return true, nil + } + } + return false, nil +} + +// FormatSignatureAlgorithmFlag formats a v1.PublicKeyDetails to a string that conforms to the naming conventions +// of CLI arguments that are used for Sigstore services. +func FormatSignatureAlgorithmFlag(algorithm v1.PublicKeyDetails) (string, error) { + for _, a := range supportedAlgorithms { + if a.knownAlgorithm == algorithm { + return a.flagValue, nil + } + } + return "", fmt.Errorf("could not find matching flag for signature algorithm: %s", algorithm) +} + +// ParseSignatureAlgorithmFlag parses a string produced by FormatSignatureAlgorithmFlag and returns the corresponding +// v1.PublicKeyDetails value. +func ParseSignatureAlgorithmFlag(flag string) (v1.PublicKeyDetails, error) { + for _, a := range supportedAlgorithms { + if a.flagValue == flag { + return a.knownAlgorithm, nil + } + } + return v1.PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED, fmt.Errorf("could not find matching signature algorithm for flag: %s", flag) +} + +// GetDefaultPublicKeyDetails returns the default public key details for a given key. +// +// RSA 2048 => v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 +// RSA 3072 => v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256 +// RSA 4096 => v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256 +// ECDSA P256 => v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 +// ECDSA P384 => v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 +// ECDSA P521 => v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512 +// ED25519 => v1.PublicKeyDetails_PKIX_ED25519_PH +// +// This function accepts LoadOptions, which are used to determine the default +// public key details when there may be ambiguities. For example, RSA keys may +// be PSS or PKCS1v1.5 encoded, and ED25519 keys may be used with PureEd25519 or +// with Ed25519ph. The Hash option is ignored if passed, because each of the +// supported algorithms already has a default hash. +func GetDefaultPublicKeyDetails(publicKey crypto.PublicKey, opts ...LoadOption) (v1.PublicKeyDetails, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyRSAPSS(&rsaPSSOptions) + } + + switch pk := publicKey.(type) { + case *rsa.PublicKey: + if rsaPSSOptions != nil { + switch pk.Size() * 8 { + case 2048: + return v1.PublicKeyDetails_PKIX_RSA_PSS_2048_SHA256, nil + case 3072: + return v1.PublicKeyDetails_PKIX_RSA_PSS_3072_SHA256, nil + case 4096: + return v1.PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256, nil + } + } else { + switch pk.Size() * 8 { + case 2048: + return v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256, nil + case 3072: + return v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256, nil + case 4096: + return v1.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256, nil + } + } + case *ecdsa.PublicKey: + switch pk.Curve { + case elliptic.P256(): + return v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256, nil + case elliptic.P384(): + return v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384, nil + case elliptic.P521(): + return v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512, nil + } + case ed25519.PublicKey: + if useED25519ph { + return v1.PublicKeyDetails_PKIX_ED25519_PH, nil + } + return v1.PublicKeyDetails_PKIX_ED25519, nil + } + return v1.PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED, errors.New("unsupported public key type") +} + +// GetDefaultAlgorithmDetails returns the default algorithm details for a given +// key, according to GetDefaultPublicKeyDetails. +// +// This function accepts LoadOptions, which are used to determine the default +// algorithm details when there may be ambiguities. For example, RSA keys may be +// PSS or PKCS1v1.5 encoded, and ED25519 keys may be used with PureEd25519 or +// with Ed25519ph. The Hash option is ignored if passed, because each of the +// supported algorithms already has a default hash. +func GetDefaultAlgorithmDetails(publicKey crypto.PublicKey, opts ...LoadOption) (AlgorithmDetails, error) { + knownAlgorithm, err := GetDefaultPublicKeyDetails(publicKey, opts...) + if err != nil { + return AlgorithmDetails{}, err + } + return GetAlgorithmDetails(knownAlgorithm) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go index 9cee68d1..d333cb23 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go @@ -188,7 +188,7 @@ func (e ECDSAVerifier) VerifySignature(signature, message io.Reader, opts ...Ver } // Without this check, VerifyASN1 panics on an invalid key. - if !e.publicKey.Curve.IsOnCurve(e.publicKey.X, e.publicKey.Y) { + if !e.publicKey.IsOnCurve(e.publicKey.X, e.publicKey.Y) { return fmt.Errorf("invalid ECDSA public key for %s", e.publicKey.Params().Name) } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go index e26def9f..1122989f 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go @@ -123,3 +123,25 @@ func LoadSignerFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts .. } return LoadSignerWithOpts(priv, opts...) } + +// LoadDefaultSigner returns a signature.Signer based on the private key. +// Each private key has a corresponding PublicKeyDetails associated in the +// Sigstore ecosystem, see Algorithm Registry for more details. +func LoadDefaultSigner(privateKey crypto.PrivateKey, opts ...LoadOption) (Signer, error) { + signer, ok := privateKey.(crypto.Signer) + if !ok { + return nil, errors.New("private key does not implement signature.Signer") + } + algorithmDetails, err := GetDefaultAlgorithmDetails(signer.Public(), opts...) + if err != nil { + return nil, err + } + return LoadSignerFromAlgorithmDetails(privateKey, algorithmDetails, opts...) +} + +// LoadSignerFromAlgorithmDetails returns a signature.Signer based on +// the algorithm details and the user's choice of options. +func LoadSignerFromAlgorithmDetails(privateKey crypto.PrivateKey, algorithmDetails AlgorithmDetails, opts ...LoadOption) (Signer, error) { + filteredOpts := GetOptsFromAlgorithmDetails(algorithmDetails, opts...) + return LoadSignerWithOpts(privateKey, filteredOpts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go index 70253b12..9ff93420 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go @@ -103,3 +103,25 @@ func LoadSignerVerifierFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, } return LoadSignerVerifierWithOpts(priv, opts...) } + +// LoadDefaultSignerVerifier returns a signature.SignerVerifier based on +// the private key. Each private key has a corresponding PublicKeyDetails +// associated in the Sigstore ecosystem, see Algorithm Registry for more details. +func LoadDefaultSignerVerifier(privateKey crypto.PrivateKey, opts ...LoadOption) (SignerVerifier, error) { + signer, ok := privateKey.(crypto.Signer) + if !ok { + return nil, errors.New("private key does not implement signature.Signer") + } + algorithmDetails, err := GetDefaultAlgorithmDetails(signer.Public(), opts...) + if err != nil { + return nil, err + } + return LoadSignerVerifierFromAlgorithmDetails(privateKey, algorithmDetails, opts...) +} + +// LoadSignerVerifierFromAlgorithmDetails returns a signature.SignerVerifier based on +// the algorithm details and the user's choice of options. +func LoadSignerVerifierFromAlgorithmDetails(privateKey crypto.PrivateKey, algorithmDetails AlgorithmDetails, opts ...LoadOption) (SignerVerifier, error) { + filteredOpts := GetOptsFromAlgorithmDetails(algorithmDetails, opts...) + return LoadSignerVerifierWithOpts(privateKey, filteredOpts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/util.go b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go index 8852ecc4..3f8beff4 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/util.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/util.go @@ -17,11 +17,13 @@ package signature import ( "bytes" + "crypto/rsa" "encoding/json" "fmt" "github.com/google/go-containerregistry/pkg/name" + "github.com/sigstore/sigstore/pkg/signature/options" sigpayload "github.com/sigstore/sigstore/pkg/signature/payload" ) @@ -53,3 +55,20 @@ func VerifyImageSignature(signer SignerVerifier, payload, signature []byte) (ima } return imgPayload.Image, imgPayload.Annotations, nil } + +// GetOptsFromAlgorithmDetails returns a list of LoadOptions that are +// appropriate for the given algorithm details. It ignores the hash type because +// that can be retrieved from the algorithm details. +func GetOptsFromAlgorithmDetails(algorithmDetails AlgorithmDetails, opts ...LoadOption) []LoadOption { + res := []LoadOption{options.WithHash(algorithmDetails.hashType)} + for _, opt := range opts { + var useED25519ph bool + var rsaPSSOptions *rsa.PSSOptions + opt.ApplyED25519ph(&useED25519ph) + opt.ApplyRSAPSS(&rsaPSSOptions) + if useED25519ph || rsaPSSOptions != nil { + res = append(res, opt) + } + } + return res +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go index cdde9fc5..0b5a1bba 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go @@ -136,3 +136,21 @@ func LoadVerifierFromPEMFileWithOpts(path string, opts ...LoadOption) (Verifier, return LoadVerifierWithOpts(pubKey, opts...) } + +// LoadDefaultVerifier returns a signature.Verifier based on the public key. +// Each public key has a corresponding PublicKeyDetails associated in the +// Sigstore ecosystem, see Algorithm Registry for more details. +func LoadDefaultVerifier(publicKey crypto.PublicKey, opts ...LoadOption) (Verifier, error) { + algorithmDetails, err := GetDefaultAlgorithmDetails(publicKey, opts...) + if err != nil { + return nil, err + } + return LoadVerifierFromAlgorithmDetails(publicKey, algorithmDetails, opts...) +} + +// LoadVerifierFromAlgorithmDetails returns a signature.Verifier based on +// the algorithm details and the user's choice of options. +func LoadVerifierFromAlgorithmDetails(publicKey crypto.PublicKey, algorithmDetails AlgorithmDetails, opts ...LoadOption) (Verifier, error) { + filteredOpts := GetOptsFromAlgorithmDetails(algorithmDetails, opts...) + return LoadVerifierWithOpts(publicKey, filteredOpts...) +} diff --git a/vendor/go.mozilla.org/pkcs7/.gitignore b/vendor/github.com/smallstep/pkcs7/.gitignore similarity index 88% rename from vendor/go.mozilla.org/pkcs7/.gitignore rename to vendor/github.com/smallstep/pkcs7/.gitignore index daf913b1..948aae2a 100644 --- a/vendor/go.mozilla.org/pkcs7/.gitignore +++ b/vendor/github.com/smallstep/pkcs7/.gitignore @@ -22,3 +22,7 @@ _testmain.go *.exe *.test *.prof + +# Development +.envrc +coverage.out \ No newline at end of file diff --git a/vendor/go.mozilla.org/pkcs7/LICENSE b/vendor/github.com/smallstep/pkcs7/LICENSE similarity index 100% rename from vendor/go.mozilla.org/pkcs7/LICENSE rename to vendor/github.com/smallstep/pkcs7/LICENSE diff --git a/vendor/go.mozilla.org/pkcs7/Makefile b/vendor/github.com/smallstep/pkcs7/Makefile similarity index 100% rename from vendor/go.mozilla.org/pkcs7/Makefile rename to vendor/github.com/smallstep/pkcs7/Makefile diff --git a/vendor/github.com/smallstep/pkcs7/README.md b/vendor/github.com/smallstep/pkcs7/README.md new file mode 100644 index 00000000..9d94e65f --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/README.md @@ -0,0 +1,63 @@ +# pkcs7 + +[![Go Reference](https://pkg.go.dev/badge/github.com/smallstep/pkcs7.svg)](https://pkg.go.dev/github.com/smallstep/pkcs7) +[![Build Status](https://github.com/smallstep/pkcs7/workflows/CI/badge.svg?query=branch%3Amain+event%3Apush)](https://github.com/smallstep/pkcs7/actions/workflows/ci.yml?query=branch%3Amain+event%3Apush) + +pkcs7 implements parsing and creating signed and enveloped messages. + +```go +package main + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "os" + + "github.com/smallstep/pkcs7" +) + +func SignAndDetach(content []byte, cert *x509.Certificate, privkey *rsa.PrivateKey) (signed []byte, err error) { + toBeSigned, err := NewSignedData(content) + if err != nil { + return fmt.Errorf("Cannot initialize signed data: %w", err) + } + if err = toBeSigned.AddSigner(cert, privkey, SignerInfoConfig{}); err != nil { + return fmt.Errorf("Cannot add signer: %w", err) + } + + // Detach signature, omit if you want an embedded signature + toBeSigned.Detach() + + signed, err = toBeSigned.Finish() + if err != nil { + return fmt.Errorf("Cannot finish signing data: %w", err) + } + + // Verify the signature + pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed}) + p7, err := pkcs7.Parse(signed) + if err != nil { + return fmt.Errorf("Cannot parse our signed data: %w", err) + } + + // since the signature was detached, reattach the content here + p7.Content = content + + if bytes.Compare(content, p7.Content) != 0 { + return fmt.Errorf("Our content was not in the parsed data:\n\tExpected: %s\n\tActual: %s", content, p7.Content) + } + if err = p7.Verify(); err != nil { + return fmt.Errorf("Cannot verify our signed data: %w", err) + } + + return signed, nil +} +``` + + +## Credits + +This is a fork of [mozilla-services/pkcs7](https://github.com/mozilla-services/pkcs7) which, itself, was a fork of [fullsailor/pkcs7](https://github.com/fullsailor/pkcs7). diff --git a/vendor/go.mozilla.org/pkcs7/ber.go b/vendor/github.com/smallstep/pkcs7/ber.go similarity index 91% rename from vendor/go.mozilla.org/pkcs7/ber.go rename to vendor/github.com/smallstep/pkcs7/ber.go index 73da024a..52333215 100644 --- a/vendor/go.mozilla.org/pkcs7/ber.go +++ b/vendor/github.com/smallstep/pkcs7/ber.go @@ -5,8 +5,6 @@ import ( "errors" ) -var encodeIndent = 0 - type asn1Object interface { EncodeTo(writer *bytes.Buffer) error } @@ -17,8 +15,6 @@ type asn1Structured struct { } func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { - //fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes) - encodeIndent++ inner := new(bytes.Buffer) for _, obj := range s.content { err := obj.EncodeTo(inner) @@ -26,7 +22,6 @@ func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { return err } } - encodeIndent-- out.Write(s.tagBytes) encodeLength(out, inner.Len()) out.Write(inner.Bytes()) @@ -47,8 +42,8 @@ func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error { if err = encodeLength(out, p.length); err != nil { return err } - //fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) - //fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) + // fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) + // fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) out.Write(p.content) return nil @@ -58,7 +53,7 @@ func ber2der(ber []byte) ([]byte, error) { if len(ber) == 0 { return nil, errors.New("ber2der: input ber is empty") } - //fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) + // fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) out := new(bytes.Buffer) obj, _, err := readObject(ber, 0) @@ -69,7 +64,7 @@ func ber2der(ber []byte) ([]byte, error) { // if offset < len(ber) { // return nil, fmt.Errorf("ber2der: Content longer than expected. Got %d, expected %d", offset, len(ber)) - //} + // } return out.Bytes(), nil } @@ -154,7 +149,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { } } // jvehent 20170227: this doesn't appear to be used anywhere... - //tag = tag*128 + ber[offset] - 0x80 + // tag = tag*128 + ber[offset] - 0x80 offset++ if offset > berLen { return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") @@ -204,7 +199,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { if length < 0 { return nil, 0, errors.New("ber2der: invalid negative value found in BER tag length") } - //fmt.Printf("--> length : %d\n", length) + // fmt.Printf("--> length : %d\n", length) contentEnd := offset + length if contentEnd > len(ber) { return nil, 0, errors.New("ber2der: BER tag length is more than available data") @@ -259,7 +254,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { } func isIndefiniteTermination(ber []byte, offset int) (bool, error) { - if len(ber) - offset < 2 { + if len(ber)-offset < 2 { return false, errors.New("ber2der: Invalid BER format") } @@ -267,5 +262,5 @@ func isIndefiniteTermination(ber []byte, offset int) (bool, error) { } func debugprint(format string, a ...interface{}) { - //fmt.Printf(format, a) + // fmt.Printf(format, a) } diff --git a/vendor/go.mozilla.org/pkcs7/decrypt.go b/vendor/github.com/smallstep/pkcs7/decrypt.go similarity index 62% rename from vendor/go.mozilla.org/pkcs7/decrypt.go rename to vendor/github.com/smallstep/pkcs7/decrypt.go index 0d088d62..76dc17f7 100644 --- a/vendor/go.mozilla.org/pkcs7/decrypt.go +++ b/vendor/github.com/smallstep/pkcs7/decrypt.go @@ -9,6 +9,7 @@ import ( "crypto/rand" "crypto/rsa" "crypto/x509" + "crypto/x509/pkix" "encoding/asn1" "errors" "fmt" @@ -17,6 +18,12 @@ import ( // ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported") +// ErrUnsupportedAsymmetricEncryptionAlgorithm is returned when attempting to use an unknown asymmetric encryption algorithm +var ErrUnsupportedAsymmetricEncryptionAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA PKCS#1 v1.5 and RSA OAEP are supported") + +// ErrUnsupportedKeyType is returned when attempting to encrypting keys using a key that's not an RSA key +var ErrUnsupportedKeyType = errors.New("pkcs7: only RSA keys are supported") + // ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type") @@ -31,9 +38,21 @@ func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte return nil, errors.New("pkcs7: no enveloped recipient for provided certificate") } switch pkey := pkey.(type) { - case *rsa.PrivateKey: - var contentKey []byte - contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, pkey, recipient.EncryptedKey) + case crypto.Decrypter: + var opts crypto.DecrypterOpts + switch algorithm := recipient.KeyEncryptionAlgorithm.Algorithm; { + case algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP): + hashFunc, err := getHashFuncForKeyEncryptionAlgorithm(recipient.KeyEncryptionAlgorithm) + if err != nil { + return nil, err + } + opts = &rsa.OAEPOptions{Hash: hashFunc} + case algorithm.Equal(OIDEncryptionAlgorithmRSA): + opts = &rsa.PKCS1v15DecryptOptions{} + default: + return nil, ErrUnsupportedAsymmetricEncryptionAlgorithm + } + contentKey, err := pkey.Decrypt(rand.Reader, recipient.EncryptedKey, opts) if err != nil { return nil, err } @@ -42,6 +61,44 @@ func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte return nil, ErrUnsupportedAlgorithm } +// RFC 4055, 4.1 +// The current ASN.1 parser does not support non-integer defaults so the 'default:' tags here do nothing. +type rsaOAEPAlgorithmParameters struct { + HashFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:0,default:sha1Identifier"` + MaskGenFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:1,default:mgf1SHA1Identifier"` + PSourceFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:2,default:pSpecifiedEmptyIdentifier"` +} + +func getHashFuncForKeyEncryptionAlgorithm(keyEncryptionAlgorithm pkix.AlgorithmIdentifier) (crypto.Hash, error) { + invalidHashFunc := crypto.Hash(0) + params := &rsaOAEPAlgorithmParameters{ + HashFunc: pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA1}, // set default hash algorithm to SHA1 + } + var rest []byte + rest, err := asn1.Unmarshal(keyEncryptionAlgorithm.Parameters.FullBytes, params) + if err != nil { + return invalidHashFunc, fmt.Errorf("pkcs7: failed unmarshaling key encryption algorithm parameters: %v", err) + } + if len(rest) != 0 { + return invalidHashFunc, errors.New("pkcs7: trailing data after RSA OAEP parameters") + } + + switch { + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return crypto.SHA1, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA224): + return crypto.SHA224, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return crypto.SHA256, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return crypto.SHA384, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return crypto.SHA512, nil + default: + return invalidHashFunc, errors.New("pkcs7: unsupported hash function for RSA OAEP") + } +} + // DecryptUsingPSK decrypts encrypted data using caller provided // pre-shared secret func (p7 *PKCS7) DecryptUsingPSK(key []byte) ([]byte, error) { @@ -60,7 +117,6 @@ func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) { !alg.Equal(OIDEncryptionAlgorithmAES128CBC) && !alg.Equal(OIDEncryptionAlgorithmAES128GCM) && !alg.Equal(OIDEncryptionAlgorithmAES256GCM) { - fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg) return nil, ErrUnsupportedAlgorithm } @@ -147,10 +203,10 @@ func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) { func unpad(data []byte, blocklen int) ([]byte, error) { if blocklen < 1 { - return nil, fmt.Errorf("invalid blocklen %d", blocklen) + return nil, fmt.Errorf("pkcs7: invalid blocklen %d", blocklen) } if len(data)%blocklen != 0 || len(data) == 0 { - return nil, fmt.Errorf("invalid data len %d", len(data)) + return nil, fmt.Errorf("pkcs7: invalid data len %d", len(data)) } // the last byte is the length of padding @@ -160,7 +216,7 @@ func unpad(data []byte, blocklen int) ([]byte, error) { pad := data[len(data)-padlen:] for _, padbyte := range pad { if padbyte != byte(padlen) { - return nil, errors.New("invalid padding") + return nil, errors.New("pkcs7: invalid padding") } } diff --git a/vendor/go.mozilla.org/pkcs7/encrypt.go b/vendor/github.com/smallstep/pkcs7/encrypt.go similarity index 74% rename from vendor/go.mozilla.org/pkcs7/encrypt.go rename to vendor/github.com/smallstep/pkcs7/encrypt.go index 6b265570..a5c96e75 100644 --- a/vendor/go.mozilla.org/pkcs7/encrypt.go +++ b/vendor/github.com/smallstep/pkcs7/encrypt.go @@ -2,6 +2,7 @@ package pkcs7 import ( "bytes" + "crypto" "crypto/aes" "crypto/cipher" "crypto/des" @@ -66,6 +67,24 @@ var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC // content with an unsupported algorithm. var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC, AES-CBC, and AES-GCM supported") +// KeyEncryptionAlgorithm determines the algorithm used to encrypt a +// content key. Change the value of this variable to change which +// algorithm is used in the Encrypt() function. +var KeyEncryptionAlgorithm = OIDEncryptionAlgorithmRSA + +// ErrUnsupportedKeyEncryptionAlgorithm is returned when an +// unsupported key encryption algorithm OID is provided. +var ErrUnsupportedKeyEncryptionAlgorithm = errors.New("pkcs7: unsupported key encryption algorithm provided") + +// KeyEncryptionHash determines the crypto.Hash algorithm to use +// when encrypting a content key. Change the value of this variable +// to change which algorithm is used in the Encrypt() function. +var KeyEncryptionHash = crypto.SHA256 + +// ErrUnsupportedKeyEncryptionHash is returned when an +// unsupported key encryption hash is provided. +var ErrUnsupportedKeyEncryptionHash = errors.New("pkcs7: unsupported key encryption hash provided") + // ErrPSKNotProvided is returned when attempting to encrypt // using a PSK without actually providing the PSK. var ErrPSKNotProvided = errors.New("pkcs7: cannot encrypt content: PSK not provided") @@ -256,7 +275,7 @@ func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, e // value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the // value before calling Encrypt(). For example: // -// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM +// ContentEncryptionAlgorithm = EncryptionAlgorithmAES256GCM // // TODO(fullsailor): Add support for encrypting content with other algorithms func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { @@ -288,7 +307,27 @@ func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { // Prepare each recipient's encrypted cipher key recipientInfos := make([]recipientInfo, len(recipients)) for i, recipient := range recipients { - encrypted, err := encryptKey(key, recipient) + algorithm := KeyEncryptionAlgorithm + hash := KeyEncryptionHash + var kea pkix.AlgorithmIdentifier + switch { + case algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP): + parameters, err := getParametersForKeyEncryptionAlgorithm(algorithm, hash) + if err != nil { + return nil, fmt.Errorf("failed to get parameters for key encryption: %v", err) + } + kea = pkix.AlgorithmIdentifier{ + Algorithm: algorithm, + Parameters: parameters, + } + case algorithm.Equal(OIDEncryptionAlgorithmRSA): + kea = pkix.AlgorithmIdentifier{ + Algorithm: algorithm, + } + default: + return nil, ErrUnsupportedKeyEncryptionAlgorithm + } + encrypted, err := encryptKey(key, recipient, algorithm, hash) if err != nil { return nil, err } @@ -297,12 +336,10 @@ func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { return nil, err } info := recipientInfo{ - Version: 0, - IssuerAndSerialNumber: ias, - KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{ - Algorithm: OIDEncryptionAlgorithmRSA, - }, - EncryptedKey: encrypted, + Version: 0, + IssuerAndSerialNumber: ias, + KeyEncryptionAlgorithm: kea, + EncryptedKey: encrypted, } recipientInfos[i] = info } @@ -327,6 +364,37 @@ func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { return asn1.Marshal(wrapper) } +func getParametersForKeyEncryptionAlgorithm(algorithm asn1.ObjectIdentifier, hash crypto.Hash) (asn1.RawValue, error) { + if !algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP) { + return asn1.RawValue{}, nil // return empty; not used + } + + params := rsaOAEPAlgorithmParameters{} + switch hash { + case crypto.SHA1: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA1} + case crypto.SHA224: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA224} + case crypto.SHA256: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA256} + case crypto.SHA384: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA384} + case crypto.SHA512: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA512} + default: + return asn1.RawValue{}, ErrUnsupportedAlgorithm + } + + b, err := asn1.Marshal(params) + if err != nil { + return asn1.RawValue{}, fmt.Errorf("failed marshaling key encryption parameters: %v", err) + } + + return asn1.RawValue{ + FullBytes: b, + }, nil +} + // EncryptUsingPSK creates and returns an encrypted data PKCS7 structure, // encrypted using caller provided pre-shared secret. func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) { @@ -375,15 +443,23 @@ func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) { } func marshalEncryptedContent(content []byte) asn1.RawValue { - asn1Content, _ := asn1.Marshal(content) - return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true} + return asn1.RawValue{Bytes: content, Class: 2, IsCompound: false} } -func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) { - if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil { +func encryptKey(key []byte, recipient *x509.Certificate, algorithm asn1.ObjectIdentifier, hash crypto.Hash) ([]byte, error) { + pub, ok := recipient.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, ErrUnsupportedKeyType + } + + switch { + case algorithm.Equal(OIDEncryptionAlgorithmRSA): return rsa.EncryptPKCS1v15(rand.Reader, pub, key) + case algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP): + return rsa.EncryptOAEP(hash.New(), rand.Reader, pub, key, nil) + default: + return nil, ErrUnsupportedKeyEncryptionAlgorithm } - return nil, ErrUnsupportedAlgorithm } func pad(data []byte, blocklen int) ([]byte, error) { diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go new file mode 100644 index 00000000..378cc265 --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go @@ -0,0 +1,14 @@ +package legacyx509 + +import "fmt" + +// legacyGodebugSetting is a type mimicking Go's internal godebug package +// settings, which are used to enable / disable certain functionalities at +// build time. +type legacyGodebugSetting int + +func (s legacyGodebugSetting) Value() string { + return fmt.Sprintf("%d", s) +} + +func (s legacyGodebugSetting) IncNonDefault() {} diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go new file mode 100644 index 00000000..7d1469b6 --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go @@ -0,0 +1,14 @@ +/* +Package legacyx509 is a copy of certain parts of Go's crypto/x509 package. +It is based on Go 1.23, and has just the parts copied over required for +parsing X509 certificates. + +The primary reason this copy exists is to keep support for parsing PKCS7 +messages containing Simple Certificate Enrolment Protocol (SCEP) requests +from Windows devices. Go 1.23 made a change marking certificates with a +critical authority key identifier as invalid, which is mandated by RFC 5280, +but apparently Windows marks those specific certificates as such, resulting +in those SCEP requests failing from being parsed correctly. +*/ + +package legacyx509 diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go new file mode 100644 index 00000000..8268a07c --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go @@ -0,0 +1,377 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package legacyx509 + +import ( + "bytes" + "encoding/asn1" + "errors" + "math" + "math/big" + "math/bits" + "strconv" + "strings" +) + +var ( + errInvalidOID = errors.New("invalid oid") +) + +// An OID represents an ASN.1 OBJECT IDENTIFIER. +type OID struct { + der []byte +} + +// ParseOID parses a Object Identifier string, represented by ASCII numbers separated by dots. +func ParseOID(oid string) (OID, error) { + var o OID + return o, o.unmarshalOIDText(oid) +} + +func newOIDFromDER(der []byte) (OID, bool) { + if len(der) == 0 || der[len(der)-1]&0x80 != 0 { + return OID{}, false + } + + start := 0 + for i, v := range der { + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == start && v == 0x80 { + return OID{}, false + } + if v&0x80 == 0 { + start = i + 1 + } + } + + return OID{der}, true +} + +// OIDFromInts creates a new OID using ints, each integer is a separate component. +func OIDFromInts(oid []uint64) (OID, error) { + if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { + return OID{}, errInvalidOID + } + + length := base128IntLength(oid[0]*40 + oid[1]) + for _, v := range oid[2:] { + length += base128IntLength(v) + } + + der := make([]byte, 0, length) + der = appendBase128Int(der, oid[0]*40+oid[1]) + for _, v := range oid[2:] { + der = appendBase128Int(der, v) + } + return OID{der}, nil +} + +func base128IntLength(n uint64) int { + if n == 0 { + return 1 + } + return (bits.Len64(n) + 6) / 7 +} + +func appendBase128Int(dst []byte, n uint64) []byte { + for i := base128IntLength(n) - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + dst = append(dst, o) + } + return dst +} + +func base128BigIntLength(n *big.Int) int { + if n.Cmp(big.NewInt(0)) == 0 { + return 1 + } + return (n.BitLen() + 6) / 7 +} + +func appendBase128BigInt(dst []byte, n *big.Int) []byte { + if n.Cmp(big.NewInt(0)) == 0 { + return append(dst, 0) + } + + for i := base128BigIntLength(n) - 1; i >= 0; i-- { + o := byte(big.NewInt(0).Rsh(n, uint(i)*7).Bits()[0]) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + dst = append(dst, o) + } + return dst +} + +// AppendText implements [encoding.TextAppender] +func (o OID) AppendText(b []byte) ([]byte, error) { + return append(b, o.String()...), nil +} + +// MarshalText implements [encoding.TextMarshaler] +func (o OID) MarshalText() ([]byte, error) { + return o.AppendText(nil) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] +func (o *OID) UnmarshalText(text []byte) error { + return o.unmarshalOIDText(string(text)) +} + +// cutString slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func cutString(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +func (o *OID) unmarshalOIDText(oid string) error { + // (*big.Int).SetString allows +/- signs, but we don't want + // to allow them in the string representation of Object Identifier, so + // reject such encodings. + for _, c := range oid { + isDigit := c >= '0' && c <= '9' + if !isDigit && c != '.' { + return errInvalidOID + } + } + + var ( + firstNum string + secondNum string + ) + + var nextComponentExists bool + firstNum, oid, nextComponentExists = cutString(oid, ".") + if !nextComponentExists { + return errInvalidOID + } + secondNum, oid, nextComponentExists = cutString(oid, ".") + + var ( + first = big.NewInt(0) + second = big.NewInt(0) + ) + + if _, ok := first.SetString(firstNum, 10); !ok { + return errInvalidOID + } + if _, ok := second.SetString(secondNum, 10); !ok { + return errInvalidOID + } + + if first.Cmp(big.NewInt(2)) > 0 || (first.Cmp(big.NewInt(2)) < 0 && second.Cmp(big.NewInt(40)) >= 0) { + return errInvalidOID + } + + firstComponent := first.Mul(first, big.NewInt(40)) + firstComponent.Add(firstComponent, second) + + der := appendBase128BigInt(make([]byte, 0, 32), firstComponent) + + for nextComponentExists { + var strNum string + strNum, oid, nextComponentExists = cutString(oid, ".") + b, ok := big.NewInt(0).SetString(strNum, 10) + if !ok { + return errInvalidOID + } + der = appendBase128BigInt(der, b) + } + + o.der = der + return nil +} + +// AppendBinary implements [encoding.BinaryAppender] +func (o OID) AppendBinary(b []byte) ([]byte, error) { + return append(b, o.der...), nil +} + +// MarshalBinary implements [encoding.BinaryMarshaler] +func (o OID) MarshalBinary() ([]byte, error) { + return o.AppendBinary(nil) +} + +// cloneBytes returns a copy of b[:len(b)]. +// The result may have additional unused capacity. +// Clone(nil) returns nil. +func cloneBytes(b []byte) []byte { + if b == nil { + return nil + } + return append([]byte{}, b...) +} + +// UnmarshalBinary implements [encoding.BinaryUnmarshaler] +func (o *OID) UnmarshalBinary(b []byte) error { + oid, ok := newOIDFromDER(cloneBytes(b)) + if !ok { + return errInvalidOID + } + *o = oid + return nil +} + +// Equal returns true when oid and other represents the same Object Identifier. +func (oid OID) Equal(other OID) bool { + // There is only one possible DER encoding of + // each unique Object Identifier. + return bytes.Equal(oid.der, other.der) +} + +func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, failed bool) { + offset = initOffset + var ret64 int64 + for shifted := 0; offset < len(bytes); shifted++ { + // 5 * 7 bits per byte == 35 bits of data + // Thus the representation is either non-minimal or too large for an int32 + if shifted == 5 { + failed = true + return + } + ret64 <<= 7 + b := bytes[offset] + // integers should be minimally encoded, so the leading octet should + // never be 0x80 + if shifted == 0 && b == 0x80 { + failed = true + return + } + ret64 |= int64(b & 0x7f) + offset++ + if b&0x80 == 0 { + ret = int(ret64) + // Ensure that the returned value fits in an int on all platforms + if ret64 > math.MaxInt32 { + failed = true + } + return + } + } + failed = true + return +} + +// EqualASN1OID returns whether an OID equals an asn1.ObjectIdentifier. If +// asn1.ObjectIdentifier cannot represent the OID specified by oid, because +// a component of OID requires more than 31 bits, it returns false. +func (oid OID) EqualASN1OID(other asn1.ObjectIdentifier) bool { + if len(other) < 2 { + return false + } + v, offset, failed := parseBase128Int(oid.der, 0) + if failed { + // This should never happen, since we've already parsed the OID, + // but just in case. + return false + } + if v < 80 { + a, b := v/40, v%40 + if other[0] != a || other[1] != b { + return false + } + } else { + a, b := 2, v-80 + if other[0] != a || other[1] != b { + return false + } + } + + i := 2 + for ; offset < len(oid.der); i++ { + v, offset, failed = parseBase128Int(oid.der, offset) + if failed { + // Again, shouldn't happen, since we've already parsed + // the OID, but better safe than sorry. + return false + } + if i >= len(other) || v != other[i] { + return false + } + } + + return i == len(other) +} + +// Strings returns the string representation of the Object Identifier. +func (oid OID) String() string { + var b strings.Builder + b.Grow(32) + const ( + valSize = 64 // size in bits of val. + bitsPerByte = 7 + maxValSafeShift = (1 << (valSize - bitsPerByte)) - 1 + ) + var ( + start = 0 + val = uint64(0) + numBuf = make([]byte, 0, 21) + bigVal *big.Int + overflow bool + ) + for i, v := range oid.der { + curVal := v & 0x7F + valEnd := v&0x80 == 0 + if valEnd { + if start != 0 { + b.WriteByte('.') + } + } + if !overflow && val > maxValSafeShift { + if bigVal == nil { + bigVal = new(big.Int) + } + bigVal = bigVal.SetUint64(val) + overflow = true + } + if overflow { + bigVal = bigVal.Lsh(bigVal, bitsPerByte).Or(bigVal, big.NewInt(int64(curVal))) + if valEnd { + if start == 0 { + b.WriteString("2.") + bigVal = bigVal.Sub(bigVal, big.NewInt(80)) + } + numBuf = bigVal.Append(numBuf, 10) + b.Write(numBuf) + numBuf = numBuf[:0] + val = 0 + start = i + 1 + overflow = false + } + continue + } + val <<= bitsPerByte + val |= uint64(curVal) + if valEnd { + if start == 0 { + if val < 80 { + b.Write(strconv.AppendUint(numBuf, val/40, 10)) + b.WriteByte('.') + b.Write(strconv.AppendUint(numBuf, val%40, 10)) + } else { + b.WriteString("2.") + b.Write(strconv.AppendUint(numBuf, val-80, 10)) + } + } else { + b.Write(strconv.AppendUint(numBuf, val, 10)) + } + val = 0 + start = i + 1 + } + } + return b.String() +} diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go new file mode 100644 index 00000000..ec57e79f --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go @@ -0,0 +1,1027 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package legacyx509 + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "net" + "net/url" + "strconv" + "strings" + "time" + "unicode/utf16" + "unicode/utf8" + + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + + stdx509 "crypto/x509" +) + +// ParseCertificates parses one or more certificates from the given ASN.1 DER +// data. The certificates must be concatenated with no intermediate padding. +func ParseCertificates(der []byte) ([]*stdx509.Certificate, error) { + var certs []*stdx509.Certificate + for len(der) > 0 { + cert, err := parseCertificate(der) + if err != nil { + return nil, err + } + certs = append(certs, cert) + der = der[len(cert.Raw):] + } + return certs, nil +} + +// isPrintable reports whether the given b is in the ASN.1 PrintableString set. +// This is a simplified version of encoding/asn1.isPrintable. +func isPrintable(b byte) bool { + return 'a' <= b && b <= 'z' || + 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || + '\'' <= b && b <= ')' || + '+' <= b && b <= '/' || + b == ' ' || + b == ':' || + b == '=' || + b == '?' || + // This is technically not allowed in a PrintableString. + // However, x509 certificates with wildcard strings don't + // always use the correct string type so we permit it. + b == '*' || + // This is not technically allowed either. However, not + // only is it relatively common, but there are also a + // handful of CA certificates that contain it. At least + // one of which will not expire until 2027. + b == '&' +} + +// parseASN1String parses the ASN.1 string types T61String, PrintableString, +// UTF8String, BMPString, IA5String, and NumericString. This is mostly copied +// from the respective encoding/asn1.parse... methods, rather than just +// increasing the API surface of that package. +func parseASN1String(tag cryptobyte_asn1.Tag, value []byte) (string, error) { + switch tag { + case cryptobyte_asn1.T61String: + return string(value), nil + case cryptobyte_asn1.PrintableString: + for _, b := range value { + if !isPrintable(b) { + return "", errors.New("invalid PrintableString") + } + } + return string(value), nil + case cryptobyte_asn1.UTF8String: + if !utf8.Valid(value) { + return "", errors.New("invalid UTF-8 string") + } + return string(value), nil + case cryptobyte_asn1.Tag(asn1.TagBMPString): + if len(value)%2 != 0 { + return "", errors.New("invalid BMPString") + } + + // Strip terminator if present. + if l := len(value); l >= 2 && value[l-1] == 0 && value[l-2] == 0 { + value = value[:l-2] + } + + s := make([]uint16, 0, len(value)/2) + for len(value) > 0 { + s = append(s, uint16(value[0])<<8+uint16(value[1])) + value = value[2:] + } + + return string(utf16.Decode(s)), nil + case cryptobyte_asn1.IA5String: + s := string(value) + if isIA5String(s) != nil { + return "", errors.New("invalid IA5String") + } + return s, nil + case cryptobyte_asn1.Tag(asn1.TagNumericString): + for _, b := range value { + if !('0' <= b && b <= '9' || b == ' ') { + return "", errors.New("invalid NumericString") + } + } + return string(value), nil + } + return "", fmt.Errorf("unsupported string type: %v", tag) +} + +// parseName parses a DER encoded Name as defined in RFC 5280. We may +// want to export this function in the future for use in crypto/tls. +func parseName(raw cryptobyte.String) (*pkix.RDNSequence, error) { + if !raw.ReadASN1(&raw, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid RDNSequence") + } + + var rdnSeq pkix.RDNSequence + for !raw.Empty() { + var rdnSet pkix.RelativeDistinguishedNameSET + var set cryptobyte.String + if !raw.ReadASN1(&set, cryptobyte_asn1.SET) { + return nil, errors.New("x509: invalid RDNSequence") + } + for !set.Empty() { + var atav cryptobyte.String + if !set.ReadASN1(&atav, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid RDNSequence: invalid attribute") + } + var attr pkix.AttributeTypeAndValue + if !atav.ReadASN1ObjectIdentifier(&attr.Type) { + return nil, errors.New("x509: invalid RDNSequence: invalid attribute type") + } + var rawValue cryptobyte.String + var valueTag cryptobyte_asn1.Tag + if !atav.ReadAnyASN1(&rawValue, &valueTag) { + return nil, errors.New("x509: invalid RDNSequence: invalid attribute value") + } + var err error + attr.Value, err = parseASN1String(valueTag, rawValue) + if err != nil { + return nil, fmt.Errorf("x509: invalid RDNSequence: invalid attribute value: %s", err) + } + rdnSet = append(rdnSet, attr) + } + + rdnSeq = append(rdnSeq, rdnSet) + } + + return &rdnSeq, nil +} + +func parseAI(der cryptobyte.String) (pkix.AlgorithmIdentifier, error) { + ai := pkix.AlgorithmIdentifier{} + if !der.ReadASN1ObjectIdentifier(&ai.Algorithm) { + return ai, errors.New("x509: malformed OID") + } + if der.Empty() { + return ai, nil + } + var params cryptobyte.String + var tag cryptobyte_asn1.Tag + if !der.ReadAnyASN1Element(¶ms, &tag) { + return ai, errors.New("x509: malformed parameters") + } + ai.Parameters.Tag = int(tag) + ai.Parameters.FullBytes = params + return ai, nil +} + +func parseTime(der *cryptobyte.String) (time.Time, error) { + var t time.Time + switch { + case der.PeekASN1Tag(cryptobyte_asn1.UTCTime): + if !der.ReadASN1UTCTime(&t) { + return t, errors.New("x509: malformed UTCTime") + } + case der.PeekASN1Tag(cryptobyte_asn1.GeneralizedTime): + if !der.ReadASN1GeneralizedTime(&t) { + return t, errors.New("x509: malformed GeneralizedTime") + } + default: + return t, errors.New("x509: unsupported time format") + } + return t, nil +} + +func parseValidity(der cryptobyte.String) (time.Time, time.Time, error) { + notBefore, err := parseTime(&der) + if err != nil { + return time.Time{}, time.Time{}, err + } + notAfter, err := parseTime(&der) + if err != nil { + return time.Time{}, time.Time{}, err + } + + return notBefore, notAfter, nil +} + +func parseExtension(der cryptobyte.String) (pkix.Extension, error) { + var ext pkix.Extension + if !der.ReadASN1ObjectIdentifier(&ext.Id) { + return ext, errors.New("x509: malformed extension OID field") + } + if der.PeekASN1Tag(cryptobyte_asn1.BOOLEAN) { + if !der.ReadASN1Boolean(&ext.Critical) { + return ext, errors.New("x509: malformed extension critical field") + } + } + var val cryptobyte.String + if !der.ReadASN1(&val, cryptobyte_asn1.OCTET_STRING) { + return ext, errors.New("x509: malformed extension value field") + } + ext.Value = val + return ext, nil +} + +func parsePublicKey(keyData *publicKeyInfo) (interface{}, error) { + oid := keyData.Algorithm.Algorithm + params := keyData.Algorithm.Parameters + der := cryptobyte.String(keyData.PublicKey.RightAlign()) + switch { + case oid.Equal(oidPublicKeyRSA): + // RSA public keys must have a NULL in the parameters. + // See RFC 3279, Section 2.3.1. + if !bytes.Equal(params.FullBytes, asn1.NullBytes) { + return nil, errors.New("x509: RSA key missing NULL parameters") + } + + p := &pkcs1PublicKey{N: new(big.Int)} + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid RSA public key") + } + if !der.ReadASN1Integer(p.N) { + return nil, errors.New("x509: invalid RSA modulus") + } + if !der.ReadASN1Integer(&p.E) { + return nil, errors.New("x509: invalid RSA public exponent") + } + + if p.N.Sign() <= 0 { + return nil, errors.New("x509: RSA modulus is not a positive number") + } + if p.E <= 0 { + return nil, errors.New("x509: RSA public exponent is not a positive number") + } + + pub := &rsa.PublicKey{ + E: p.E, + N: p.N, + } + return pub, nil + case oid.Equal(oidPublicKeyECDSA): + paramsDer := cryptobyte.String(params.FullBytes) + namedCurveOID := new(asn1.ObjectIdentifier) + if !paramsDer.ReadASN1ObjectIdentifier(namedCurveOID) { + return nil, errors.New("x509: invalid ECDSA parameters") + } + namedCurve := namedCurveFromOID(*namedCurveOID) + if namedCurve == nil { + return nil, errors.New("x509: unsupported elliptic curve") + } + x, y := elliptic.Unmarshal(namedCurve, der) + if x == nil { + return nil, errors.New("x509: failed to unmarshal elliptic curve point") + } + pub := &ecdsa.PublicKey{ + Curve: namedCurve, + X: x, + Y: y, + } + return pub, nil + case oid.Equal(oidPublicKeyEd25519): + // RFC 8410, Section 3 + // > For all of the OIDs, the parameters MUST be absent. + if len(params.FullBytes) != 0 { + return nil, errors.New("x509: Ed25519 key encoded with illegal parameters") + } + if len(der) != ed25519.PublicKeySize { + return nil, errors.New("x509: wrong Ed25519 public key size") + } + return ed25519.PublicKey(der), nil + // case oid.Equal(oidPublicKeyX25519): + // // RFC 8410, Section 3 + // // > For all of the OIDs, the parameters MUST be absent. + // if len(params.FullBytes) != 0 { + // return nil, errors.New("x509: X25519 key encoded with illegal parameters") + // } + // return ecdh.X25519().NewPublicKey(der) + case oid.Equal(oidPublicKeyDSA): + y := new(big.Int) + if !der.ReadASN1Integer(y) { + return nil, errors.New("x509: invalid DSA public key") + } + pub := &dsa.PublicKey{ + Y: y, + Parameters: dsa.Parameters{ + P: new(big.Int), + Q: new(big.Int), + G: new(big.Int), + }, + } + paramsDer := cryptobyte.String(params.FullBytes) + if !paramsDer.ReadASN1(¶msDer, cryptobyte_asn1.SEQUENCE) || + !paramsDer.ReadASN1Integer(pub.Parameters.P) || + !paramsDer.ReadASN1Integer(pub.Parameters.Q) || + !paramsDer.ReadASN1Integer(pub.Parameters.G) { + return nil, errors.New("x509: invalid DSA parameters") + } + if pub.Y.Sign() <= 0 || pub.Parameters.P.Sign() <= 0 || + pub.Parameters.Q.Sign() <= 0 || pub.Parameters.G.Sign() <= 0 { + return nil, errors.New("x509: zero or negative DSA parameter") + } + return pub, nil + default: + return nil, errors.New("x509: unknown public key algorithm") + } +} + +func parseKeyUsageExtension(der cryptobyte.String) (stdx509.KeyUsage, error) { + var usageBits asn1.BitString + if !der.ReadASN1BitString(&usageBits) { + return 0, errors.New("x509: invalid key usage") + } + + var usage int + for i := 0; i < 9; i++ { + if usageBits.At(i) != 0 { + usage |= 1 << uint(i) + } + } + return stdx509.KeyUsage(usage), nil +} + +func parseBasicConstraintsExtension(der cryptobyte.String) (bool, int, error) { + var isCA bool + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return false, 0, errors.New("x509: invalid basic constraints") + } + if der.PeekASN1Tag(cryptobyte_asn1.BOOLEAN) { + if !der.ReadASN1Boolean(&isCA) { + return false, 0, errors.New("x509: invalid basic constraints") + } + } + maxPathLen := -1 + if der.PeekASN1Tag(cryptobyte_asn1.INTEGER) { + if !der.ReadASN1Integer(&maxPathLen) { + return false, 0, errors.New("x509: invalid basic constraints") + } + } + + // TODO: map out.MaxPathLen to 0 if it has the -1 default value? (Issue 19285) + return isCA, maxPathLen, nil +} + +func forEachSAN(der cryptobyte.String, callback func(tag int, data []byte) error) error { + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid subject alternative names") + } + for !der.Empty() { + var san cryptobyte.String + var tag cryptobyte_asn1.Tag + if !der.ReadAnyASN1(&san, &tag) { + return errors.New("x509: invalid subject alternative name") + } + if err := callback(int(tag^0x80), san); err != nil { + return err + } + } + + return nil +} + +func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL, err error) { + err = forEachSAN(der, func(tag int, data []byte) error { + switch tag { + case nameTypeEmail: + email := string(data) + if err := isIA5String(email); err != nil { + return errors.New("x509: SAN rfc822Name is malformed") + } + emailAddresses = append(emailAddresses, email) + case nameTypeDNS: + name := string(data) + if err := isIA5String(name); err != nil { + return errors.New("x509: SAN dNSName is malformed") + } + dnsNames = append(dnsNames, string(name)) + case nameTypeURI: + uriStr := string(data) + if err := isIA5String(uriStr); err != nil { + return errors.New("x509: SAN uniformResourceIdentifier is malformed") + } + uri, err := url.Parse(uriStr) + if err != nil { + return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err) + } + if len(uri.Host) > 0 { + if _, ok := domainToReverseLabels(uri.Host); !ok { + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) + } + } + uris = append(uris, uri) + case nameTypeIP: + switch len(data) { + case net.IPv4len, net.IPv6len: + ipAddresses = append(ipAddresses, data) + default: + return errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data))) + } + } + + return nil + }) + + return +} + +func parseAuthorityKeyIdentifier(e pkix.Extension) ([]byte, error) { + // RFC 5280, Section 4.2.1.1 + // if e.Critical { + // // Conforming CAs MUST mark this extension as non-critical + // return nil, errors.New("x509: authority key identifier incorrectly marked critical") + // } + val := cryptobyte.String(e.Value) + var akid cryptobyte.String + if !val.ReadASN1(&akid, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid authority key identifier") + } + if akid.PeekASN1Tag(cryptobyte_asn1.Tag(0).ContextSpecific()) { + if !akid.ReadASN1(&akid, cryptobyte_asn1.Tag(0).ContextSpecific()) { + return nil, errors.New("x509: invalid authority key identifier") + } + return akid, nil + } + return nil, nil +} + +func parseExtKeyUsageExtension(der cryptobyte.String) ([]stdx509.ExtKeyUsage, []asn1.ObjectIdentifier, error) { + var extKeyUsages []stdx509.ExtKeyUsage + var unknownUsages []asn1.ObjectIdentifier + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return nil, nil, errors.New("x509: invalid extended key usages") + } + for !der.Empty() { + var eku asn1.ObjectIdentifier + if !der.ReadASN1ObjectIdentifier(&eku) { + return nil, nil, errors.New("x509: invalid extended key usages") + } + if extKeyUsage, ok := extKeyUsageFromOID(eku); ok { + extKeyUsages = append(extKeyUsages, stdx509.ExtKeyUsage(extKeyUsage)) + } else { + unknownUsages = append(unknownUsages, eku) + } + } + return extKeyUsages, unknownUsages, nil +} + +// func parseCertificatePoliciesExtension(der cryptobyte.String) ([]OID, error) { +// var oids []OID +// if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { +// return nil, errors.New("x509: invalid certificate policies") +// } +// for !der.Empty() { +// var cp cryptobyte.String +// var OIDBytes cryptobyte.String +// if !der.ReadASN1(&cp, cryptobyte_asn1.SEQUENCE) || !cp.ReadASN1(&OIDBytes, cryptobyte_asn1.OBJECT_IDENTIFIER) { +// return nil, errors.New("x509: invalid certificate policies") +// } +// oid, ok := newOIDFromDER(OIDBytes) +// if !ok { +// return nil, errors.New("x509: invalid certificate policies") +// } +// oids = append(oids, oid) +// } +// return oids, nil +// } + +// isValidIPMask reports whether mask consists of zero or more 1 bits, followed by zero bits. +func isValidIPMask(mask []byte) bool { + seenZero := false + + for _, b := range mask { + if seenZero { + if b != 0 { + return false + } + + continue + } + + switch b { + case 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe: + seenZero = true + case 0xff: + default: + return false + } + } + + return true +} + +func parseNameConstraintsExtension(out *stdx509.Certificate, e pkix.Extension) (unhandled bool, err error) { + // RFC 5280, 4.2.1.10 + + // NameConstraints ::= SEQUENCE { + // permittedSubtrees [0] GeneralSubtrees OPTIONAL, + // excludedSubtrees [1] GeneralSubtrees OPTIONAL } + // + // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree + // + // GeneralSubtree ::= SEQUENCE { + // base GeneralName, + // minimum [0] BaseDistance DEFAULT 0, + // maximum [1] BaseDistance OPTIONAL } + // + // BaseDistance ::= INTEGER (0..MAX) + + outer := cryptobyte.String(e.Value) + var toplevel, permitted, excluded cryptobyte.String + var havePermitted, haveExcluded bool + if !outer.ReadASN1(&toplevel, cryptobyte_asn1.SEQUENCE) || + !outer.Empty() || + !toplevel.ReadOptionalASN1(&permitted, &havePermitted, cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()) || + !toplevel.ReadOptionalASN1(&excluded, &haveExcluded, cryptobyte_asn1.Tag(1).ContextSpecific().Constructed()) || + !toplevel.Empty() { + return false, errors.New("x509: invalid NameConstraints extension") + } + + if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 { + // From RFC 5280, Section 4.2.1.10: + // “either the permittedSubtrees field + // or the excludedSubtrees MUST be + // present” + return false, errors.New("x509: empty name constraints extension") + } + + getValues := func(subtrees cryptobyte.String) (dnsNames []string, ips []*net.IPNet, emails, uriDomains []string, err error) { + for !subtrees.Empty() { + var seq, value cryptobyte.String + var tag cryptobyte_asn1.Tag + if !subtrees.ReadASN1(&seq, cryptobyte_asn1.SEQUENCE) || + !seq.ReadAnyASN1(&value, &tag) { + return nil, nil, nil, nil, fmt.Errorf("x509: invalid NameConstraints extension") + } + + var ( + dnsTag = cryptobyte_asn1.Tag(2).ContextSpecific() + emailTag = cryptobyte_asn1.Tag(1).ContextSpecific() + ipTag = cryptobyte_asn1.Tag(7).ContextSpecific() + uriTag = cryptobyte_asn1.Tag(6).ContextSpecific() + ) + + switch tag { + case dnsTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain + // itself, but that's not valid in a + // normal domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain) + } + dnsNames = append(dnsNames, domain) + + case ipTag: + l := len(value) + var ip, mask []byte + + switch l { + case 8: + ip = value[:4] + mask = value[4:] + + case 32: + ip = value[:16] + mask = value[16:] + + default: + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained value of length %d", l) + } + + if !isValidIPMask(mask) { + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained invalid mask %x", mask) + } + + ips = append(ips, &net.IPNet{IP: net.IP(ip), Mask: net.IPMask(mask)}) + + case emailTag: + constraint := string(value) + if err := isIA5String(constraint); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + // If the constraint contains an @ then + // it specifies an exact mailbox name. + if strings.Contains(constraint, "@") { + if _, ok := parseRFC2821Mailbox(constraint); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) + } + } else { + // Otherwise it's a domain name. + domain := constraint + if len(domain) > 0 && domain[0] == '.' { + domain = domain[1:] + } + if _, ok := domainToReverseLabels(domain); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) + } + } + emails = append(emails, constraint) + + case uriTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + if net.ParseIP(domain) != nil { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain itself, + // but that's not valid in a normal + // domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain) + } + uriDomains = append(uriDomains, domain) + + default: + unhandled = true + } + } + + return dnsNames, ips, emails, uriDomains, nil + } + + if out.PermittedDNSDomains, out.PermittedIPRanges, out.PermittedEmailAddresses, out.PermittedURIDomains, err = getValues(permitted); err != nil { + return false, err + } + if out.ExcludedDNSDomains, out.ExcludedIPRanges, out.ExcludedEmailAddresses, out.ExcludedURIDomains, err = getValues(excluded); err != nil { + return false, err + } + out.PermittedDNSDomainsCritical = e.Critical + + return unhandled, nil +} + +func processExtensions(out *stdx509.Certificate) error { + var err error + for _, e := range out.Extensions { + unhandled := false + + if len(e.Id) == 4 && e.Id[0] == 2 && e.Id[1] == 5 && e.Id[2] == 29 { + switch e.Id[3] { + case 15: + out.KeyUsage, err = parseKeyUsageExtension(e.Value) + if err != nil { + return err + } + case 19: + out.IsCA, out.MaxPathLen, err = parseBasicConstraintsExtension(e.Value) + if err != nil { + return err + } + out.BasicConstraintsValid = true + out.MaxPathLenZero = out.MaxPathLen == 0 + case 17: + out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(e.Value) + if err != nil { + return err + } + + if len(out.DNSNames) == 0 && len(out.EmailAddresses) == 0 && len(out.IPAddresses) == 0 && len(out.URIs) == 0 { + // If we didn't parse anything then we do the critical check, below. + unhandled = true + } + + case 30: + unhandled, err = parseNameConstraintsExtension(out, e) + if err != nil { + return err + } + + case 31: + // RFC 5280, 4.2.1.13 + + // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint + // + // DistributionPoint ::= SEQUENCE { + // distributionPoint [0] DistributionPointName OPTIONAL, + // reasons [1] ReasonFlags OPTIONAL, + // cRLIssuer [2] GeneralNames OPTIONAL } + // + // DistributionPointName ::= CHOICE { + // fullName [0] GeneralNames, + // nameRelativeToCRLIssuer [1] RelativeDistinguishedName } + val := cryptobyte.String(e.Value) + if !val.ReadASN1(&val, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid CRL distribution points") + } + for !val.Empty() { + var dpDER cryptobyte.String + if !val.ReadASN1(&dpDER, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid CRL distribution point") + } + var dpNameDER cryptobyte.String + var dpNamePresent bool + if !dpDER.ReadOptionalASN1(&dpNameDER, &dpNamePresent, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) { + return errors.New("x509: invalid CRL distribution point") + } + if !dpNamePresent { + continue + } + if !dpNameDER.ReadASN1(&dpNameDER, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) { + return errors.New("x509: invalid CRL distribution point") + } + for !dpNameDER.Empty() { + if !dpNameDER.PeekASN1Tag(cryptobyte_asn1.Tag(6).ContextSpecific()) { + break + } + var uri cryptobyte.String + if !dpNameDER.ReadASN1(&uri, cryptobyte_asn1.Tag(6).ContextSpecific()) { + return errors.New("x509: invalid CRL distribution point") + } + out.CRLDistributionPoints = append(out.CRLDistributionPoints, string(uri)) + } + } + + case 35: + out.AuthorityKeyId, err = parseAuthorityKeyIdentifier(e) + if err != nil { + return err + } + case 37: + out.ExtKeyUsage, out.UnknownExtKeyUsage, err = parseExtKeyUsageExtension(e.Value) + if err != nil { + return err + } + case 14: + // RFC 5280, 4.2.1.2 + if e.Critical { + // Conforming CAs MUST mark this extension as non-critical + return errors.New("x509: subject key identifier incorrectly marked critical") + } + val := cryptobyte.String(e.Value) + var skid cryptobyte.String + if !val.ReadASN1(&skid, cryptobyte_asn1.OCTET_STRING) { + return errors.New("x509: invalid subject key identifier") + } + out.SubjectKeyId = skid + // case 32: + // out.Policies, err = parseCertificatePoliciesExtension(e.Value) + // if err != nil { + // return err + // } + // out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, 0, len(out.Policies)) + // for _, oid := range out.Policies { + // if oid, ok := oid.toASN1OID(); ok { + // out.PolicyIdentifiers = append(out.PolicyIdentifiers, oid) + // } + // } + default: + // Unknown extensions are recorded if critical. + unhandled = true + } + } else if e.Id.Equal(oidExtensionAuthorityInfoAccess) { + // RFC 5280 4.2.2.1: Authority Information Access + if e.Critical { + // Conforming CAs MUST mark this extension as non-critical + return errors.New("x509: authority info access incorrectly marked critical") + } + val := cryptobyte.String(e.Value) + if !val.ReadASN1(&val, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid authority info access") + } + for !val.Empty() { + var aiaDER cryptobyte.String + if !val.ReadASN1(&aiaDER, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid authority info access") + } + var method asn1.ObjectIdentifier + if !aiaDER.ReadASN1ObjectIdentifier(&method) { + return errors.New("x509: invalid authority info access") + } + if !aiaDER.PeekASN1Tag(cryptobyte_asn1.Tag(6).ContextSpecific()) { + continue + } + if !aiaDER.ReadASN1(&aiaDER, cryptobyte_asn1.Tag(6).ContextSpecific()) { + return errors.New("x509: invalid authority info access") + } + switch { + case method.Equal(oidAuthorityInfoAccessOcsp): + out.OCSPServer = append(out.OCSPServer, string(aiaDER)) + case method.Equal(oidAuthorityInfoAccessIssuers): + out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(aiaDER)) + } + } + } else { + // Unknown extensions are recorded if critical. + unhandled = true + } + + if e.Critical && unhandled { + out.UnhandledCriticalExtensions = append(out.UnhandledCriticalExtensions, e.Id) + } + } + + return nil +} + +var x509negativeserial = legacyGodebugSetting(0) // replaces godebug.New("x509negativeserial") + +func parseCertificate(der []byte) (*stdx509.Certificate, error) { + cert := &stdx509.Certificate{} + + input := cryptobyte.String(der) + // we read the SEQUENCE including length and tag bytes so that + // we can populate Certificate.Raw, before unwrapping the + // SEQUENCE so it can be operated on + if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed certificate") + } + cert.Raw = input + if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed certificate") + } + + var tbs cryptobyte.String + // do the same trick again as above to extract the raw + // bytes for Certificate.RawTBSCertificate + if !input.ReadASN1Element(&tbs, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed tbs certificate") + } + cert.RawTBSCertificate = tbs + if !tbs.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed tbs certificate") + } + + if !tbs.ReadOptionalASN1Integer(&cert.Version, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific(), 0) { + return nil, errors.New("x509: malformed version") + } + if cert.Version < 0 { + return nil, errors.New("x509: malformed version") + } + // for backwards compat reasons Version is one-indexed, + // rather than zero-indexed as defined in 5280 + cert.Version++ + if cert.Version > 3 { + return nil, errors.New("x509: invalid version") + } + + serial := new(big.Int) + if !tbs.ReadASN1Integer(serial) { + return nil, errors.New("x509: malformed serial number") + } + if serial.Sign() == -1 { + if x509negativeserial.Value() != "1" { + return nil, errors.New("x509: negative serial number") + } else { + x509negativeserial.IncNonDefault() + } + } + cert.SerialNumber = serial + + var sigAISeq cryptobyte.String + if !tbs.ReadASN1(&sigAISeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed signature algorithm identifier") + } + // Before parsing the inner algorithm identifier, extract + // the outer algorithm identifier and make sure that they + // match. + var outerSigAISeq cryptobyte.String + if !input.ReadASN1(&outerSigAISeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed algorithm identifier") + } + if !bytes.Equal(outerSigAISeq, sigAISeq) { + return nil, errors.New("x509: inner and outer signature algorithm identifiers don't match") + } + sigAI, err := parseAI(sigAISeq) + if err != nil { + return nil, err + } + cert.SignatureAlgorithm = getSignatureAlgorithmFromAI(sigAI) + + var issuerSeq cryptobyte.String + if !tbs.ReadASN1Element(&issuerSeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed issuer") + } + cert.RawIssuer = issuerSeq + issuerRDNs, err := parseName(issuerSeq) + if err != nil { + return nil, err + } + cert.Issuer.FillFromRDNSequence(issuerRDNs) + + var validity cryptobyte.String + if !tbs.ReadASN1(&validity, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed validity") + } + cert.NotBefore, cert.NotAfter, err = parseValidity(validity) + if err != nil { + return nil, err + } + + var subjectSeq cryptobyte.String + if !tbs.ReadASN1Element(&subjectSeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed issuer") + } + cert.RawSubject = subjectSeq + subjectRDNs, err := parseName(subjectSeq) + if err != nil { + return nil, err + } + cert.Subject.FillFromRDNSequence(subjectRDNs) + + var spki cryptobyte.String + if !tbs.ReadASN1Element(&spki, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed spki") + } + cert.RawSubjectPublicKeyInfo = spki + if !spki.ReadASN1(&spki, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed spki") + } + var pkAISeq cryptobyte.String + if !spki.ReadASN1(&pkAISeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed public key algorithm identifier") + } + pkAI, err := parseAI(pkAISeq) + if err != nil { + return nil, err + } + cert.PublicKeyAlgorithm = getPublicKeyAlgorithmFromOID(pkAI.Algorithm) + var spk asn1.BitString + if !spki.ReadASN1BitString(&spk) { + return nil, errors.New("x509: malformed subjectPublicKey") + } + if cert.PublicKeyAlgorithm != stdx509.UnknownPublicKeyAlgorithm { + cert.PublicKey, err = parsePublicKey(&publicKeyInfo{ + Algorithm: pkAI, + PublicKey: spk, + }) + if err != nil { + return nil, err + } + } + + if cert.Version > 1 { + if !tbs.SkipOptionalASN1(cryptobyte_asn1.Tag(1).ContextSpecific()) { + return nil, errors.New("x509: malformed issuerUniqueID") + } + if !tbs.SkipOptionalASN1(cryptobyte_asn1.Tag(2).ContextSpecific()) { + return nil, errors.New("x509: malformed subjectUniqueID") + } + if cert.Version == 3 { + var extensions cryptobyte.String + var present bool + if !tbs.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.Tag(3).Constructed().ContextSpecific()) { + return nil, errors.New("x509: malformed extensions") + } + if present { + seenExts := make(map[string]bool) + if !extensions.ReadASN1(&extensions, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed extensions") + } + for !extensions.Empty() { + var extension cryptobyte.String + if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed extension") + } + ext, err := parseExtension(extension) + if err != nil { + return nil, err + } + oidStr := ext.Id.String() + if seenExts[oidStr] { + return nil, fmt.Errorf("x509: certificate contains duplicate extension with OID %q", oidStr) + } + seenExts[oidStr] = true + cert.Extensions = append(cert.Extensions, ext) + } + err = processExtensions(cert) + if err != nil { + return nil, err + } + } + } + } + + var signature asn1.BitString + if !input.ReadASN1BitString(&signature) { + return nil, errors.New("x509: malformed signature") + } + cert.Signature = signature.RightAlign() + + return cert, nil +} diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go new file mode 100644 index 00000000..da3c38a4 --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go @@ -0,0 +1,15 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package legacyx509 + +import ( + "math/big" +) + +// pkcs1PublicKey reflects the ASN.1 structure of a PKCS #1 public key. +type pkcs1PublicKey struct { + N *big.Int + E int +} diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go new file mode 100644 index 00000000..901e3ba8 --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go @@ -0,0 +1,193 @@ +package legacyx509 + +import ( + "bytes" + "strings" +) + +// rfc2821Mailbox represents a “mailbox” (which is an email address to most +// people) by breaking it into the “local” (i.e. before the '@') and “domain” +// parts. +type rfc2821Mailbox struct { + local, domain string +} + +// parseRFC2821Mailbox parses an email address into local and domain parts, +// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280, +// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The +// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”. +func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { + if len(in) == 0 { + return mailbox, false + } + + localPartBytes := make([]byte, 0, len(in)/2) + + if in[0] == '"' { + // Quoted-string = DQUOTE *qcontent DQUOTE + // non-whitespace-control = %d1-8 / %d11 / %d12 / %d14-31 / %d127 + // qcontent = qtext / quoted-pair + // qtext = non-whitespace-control / + // %d33 / %d35-91 / %d93-126 + // quoted-pair = ("\" text) / obs-qp + // text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text + // + // (Names beginning with “obs-” are the obsolete syntax from RFC 2822, + // Section 4. Since it has been 16 years, we no longer accept that.) + in = in[1:] + QuotedString: + for { + if len(in) == 0 { + return mailbox, false + } + c := in[0] + in = in[1:] + + switch { + case c == '"': + break QuotedString + + case c == '\\': + // quoted-pair + if len(in) == 0 { + return mailbox, false + } + if in[0] == 11 || + in[0] == 12 || + (1 <= in[0] && in[0] <= 9) || + (14 <= in[0] && in[0] <= 127) { + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + } else { + return mailbox, false + } + + case c == 11 || + c == 12 || + // Space (char 32) is not allowed based on the + // BNF, but RFC 3696 gives an example that + // assumes that it is. Several “verified” + // errata continue to argue about this point. + // We choose to accept it. + c == 32 || + c == 33 || + c == 127 || + (1 <= c && c <= 8) || + (14 <= c && c <= 31) || + (35 <= c && c <= 91) || + (93 <= c && c <= 126): + // qtext + localPartBytes = append(localPartBytes, c) + + default: + return mailbox, false + } + } + } else { + // Atom ("." Atom)* + NextChar: + for len(in) > 0 { + // atext from RFC 2822, Section 3.2.4 + c := in[0] + + switch { + case c == '\\': + // Examples given in RFC 3696 suggest that + // escaped characters can appear outside of a + // quoted string. Several “verified” errata + // continue to argue the point. We choose to + // accept it. + in = in[1:] + if len(in) == 0 { + return mailbox, false + } + fallthrough + + case ('0' <= c && c <= '9') || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + c == '!' || c == '#' || c == '$' || c == '%' || + c == '&' || c == '\'' || c == '*' || c == '+' || + c == '-' || c == '/' || c == '=' || c == '?' || + c == '^' || c == '_' || c == '`' || c == '{' || + c == '|' || c == '}' || c == '~' || c == '.': + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + + default: + break NextChar + } + } + + if len(localPartBytes) == 0 { + return mailbox, false + } + + // From RFC 3696, Section 3: + // “period (".") may also appear, but may not be used to start + // or end the local part, nor may two or more consecutive + // periods appear.” + twoDots := []byte{'.', '.'} + if localPartBytes[0] == '.' || + localPartBytes[len(localPartBytes)-1] == '.' || + bytes.Contains(localPartBytes, twoDots) { + return mailbox, false + } + } + + if len(in) == 0 || in[0] != '@' { + return mailbox, false + } + in = in[1:] + + // The RFC species a format for domains, but that's known to be + // violated in practice so we accept that anything after an '@' is the + // domain part. + if _, ok := domainToReverseLabels(in); !ok { + return mailbox, false + } + + mailbox.local = string(localPartBytes) + mailbox.domain = in + return mailbox, true +} + +// domainToReverseLabels converts a textual domain name like foo.example.com to +// the list of labels in reverse order, e.g. ["com", "example", "foo"]. +func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { + for len(domain) > 0 { + if i := strings.LastIndexByte(domain, '.'); i == -1 { + reverseLabels = append(reverseLabels, domain) + domain = "" + } else { + reverseLabels = append(reverseLabels, domain[i+1:]) + domain = domain[:i] + if i == 0 { // domain == "" + // domain is prefixed with an empty label, append an empty + // string to reverseLabels to indicate this. + reverseLabels = append(reverseLabels, "") + } + } + } + + if len(reverseLabels) > 0 && len(reverseLabels[0]) == 0 { + // An empty label at the end indicates an absolute value. + return nil, false + } + + for _, label := range reverseLabels { + if len(label) == 0 { + // Empty labels are otherwise invalid. + return nil, false + } + + for _, c := range label { + if c < 33 || c > 126 { + // Invalid character. + return nil, false + } + } + } + + return reverseLabels, true +} diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go new file mode 100644 index 00000000..a4500bfb --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go @@ -0,0 +1,488 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package x509 implements a subset of the X.509 standard. +// +// It allows parsing and generating certificates, certificate signing +// requests, certificate revocation lists, and encoded public and private keys. +// It provides a certificate verifier, complete with a chain builder. +// +// The package targets the X.509 technical profile defined by the IETF (RFC +// 2459/3280/5280), and as further restricted by the CA/Browser Forum Baseline +// Requirements. There is minimal support for features outside of these +// profiles, as the primary goal of the package is to provide compatibility +// with the publicly trusted TLS certificate ecosystem and its policies and +// constraints. +// +// On macOS and Windows, certificate verification is handled by system APIs, but +// the package aims to apply consistent validation rules across operating +// systems. +package legacyx509 + +import ( + "bytes" + "crypto" + "crypto/elliptic" + stdx509 "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "fmt" + "strconv" + "unicode" + + // Explicitly import these for their crypto.RegisterHash init side-effects. + // Keep these as blank imports, even if they're imported above. + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +type publicKeyInfo struct { + Raw asn1.RawContent + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString +} + +type SignatureAlgorithm int + +const ( + UnknownSignatureAlgorithm SignatureAlgorithm = iota + + MD2WithRSA // Unsupported. + MD5WithRSA // Only supported for signing, not verification. + SHA1WithRSA // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses. + SHA256WithRSA + SHA384WithRSA + SHA512WithRSA + DSAWithSHA1 // Unsupported. + DSAWithSHA256 // Unsupported. + ECDSAWithSHA1 // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses. + ECDSAWithSHA256 + ECDSAWithSHA384 + ECDSAWithSHA512 + SHA256WithRSAPSS + SHA384WithRSAPSS + SHA512WithRSAPSS + PureEd25519 +) + +func (algo SignatureAlgorithm) String() string { + for _, details := range signatureAlgorithmDetails { + if details.algo == algo { + return details.name + } + } + return strconv.Itoa(int(algo)) +} + +type PublicKeyAlgorithm int + +const ( + UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota + RSA + DSA // Only supported for parsing. + ECDSA + Ed25519 +) + +var publicKeyAlgoName = [...]string{ + RSA: "RSA", + DSA: "DSA", + ECDSA: "ECDSA", + Ed25519: "Ed25519", +} + +func (algo PublicKeyAlgorithm) String() string { + if 0 < algo && int(algo) < len(publicKeyAlgoName) { + return publicKeyAlgoName[algo] + } + return strconv.Itoa(int(algo)) +} + +// OIDs for signature algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 } +// +// RFC 3279 2.2.1 RSA Signature Algorithms +// +// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 } +// +// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 } +// +// dsaWithSha1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 } +// +// RFC 3279 2.2.3 ECDSA Signature Algorithm +// +// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-x962(10045) +// signatures(4) ecdsa-with-SHA1(1)} +// +// RFC 4055 5 PKCS #1 Version 1.5 +// +// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 } +// +// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 } +// +// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 } +// +// RFC 5758 3.1 DSA Signature Algorithms +// +// dsaWithSha256 OBJECT IDENTIFIER ::= { +// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101) +// csor(3) algorithms(4) id-dsa-with-sha2(3) 2} +// +// RFC 5758 3.2 ECDSA Signature Algorithm +// +// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 } +// +// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 } +// +// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 } +// +// RFC 8410 3 Curve25519 and Curve448 Algorithm Identifiers +// +// id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 } +var ( + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} + + oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8} + + // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA + // but it's specified by ISO. Microsoft's makecert.exe has been known + // to produce certificates with this OID. + oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29} +) + +var signatureAlgorithmDetails = []struct { + algo SignatureAlgorithm + name string + oid asn1.ObjectIdentifier + params asn1.RawValue + pubKeyAlgo PublicKeyAlgorithm + hash crypto.Hash + isRSAPSS bool +}{ + {MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, asn1.NullRawValue, RSA, crypto.MD5, false}, + {SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, asn1.NullRawValue, RSA, crypto.SHA1, false}, + {SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, asn1.NullRawValue, RSA, crypto.SHA1, false}, + {SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, asn1.NullRawValue, RSA, crypto.SHA256, false}, + {SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, asn1.NullRawValue, RSA, crypto.SHA384, false}, + {SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, asn1.NullRawValue, RSA, crypto.SHA512, false}, + {SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, pssParametersSHA256, RSA, crypto.SHA256, true}, + {SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, pssParametersSHA384, RSA, crypto.SHA384, true}, + {SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, pssParametersSHA512, RSA, crypto.SHA512, true}, + {DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, emptyRawValue, DSA, crypto.SHA1, false}, + {DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, emptyRawValue, DSA, crypto.SHA256, false}, + {ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, emptyRawValue, ECDSA, crypto.SHA1, false}, + {ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, emptyRawValue, ECDSA, crypto.SHA256, false}, + {ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, emptyRawValue, ECDSA, crypto.SHA384, false}, + {ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, emptyRawValue, ECDSA, crypto.SHA512, false}, + {PureEd25519, "Ed25519", oidSignatureEd25519, emptyRawValue, Ed25519, crypto.Hash(0) /* no pre-hashing */, false}, +} + +var emptyRawValue = asn1.RawValue{} + +// DER encoded RSA PSS parameters for the +// SHA256, SHA384, and SHA512 hashes as defined in RFC 3447, Appendix A.2.3. +// The parameters contain the following values: +// - hashAlgorithm contains the associated hash identifier with NULL parameters +// - maskGenAlgorithm always contains the default mgf1SHA1 identifier +// - saltLength contains the length of the associated hash +// - trailerField always contains the default trailerFieldBC value +var ( + pssParametersSHA256 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 162, 3, 2, 1, 32}} + pssParametersSHA384 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 162, 3, 2, 1, 48}} + pssParametersSHA512 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 162, 3, 2, 1, 64}} +) + +// pssParameters reflects the parameters in an AlgorithmIdentifier that +// specifies RSA PSS. See RFC 3447, Appendix A.2.3. +type pssParameters struct { + // The following three fields are not marked as + // optional because the default values specify SHA-1, + // which is no longer suitable for use in signatures. + Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"` + MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"` + SaltLength int `asn1:"explicit,tag:2"` + TrailerField int `asn1:"optional,explicit,tag:3,default:1"` +} + +func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) stdx509.SignatureAlgorithm { + if ai.Algorithm.Equal(oidSignatureEd25519) { + // RFC 8410, Section 3 + // > For all of the OIDs, the parameters MUST be absent. + if len(ai.Parameters.FullBytes) != 0 { + return stdx509.UnknownSignatureAlgorithm + } + } + + if !ai.Algorithm.Equal(oidSignatureRSAPSS) { + for _, details := range signatureAlgorithmDetails { + if ai.Algorithm.Equal(details.oid) { + return stdx509.SignatureAlgorithm(details.algo) + } + } + return stdx509.UnknownSignatureAlgorithm + } + + // RSA PSS is special because it encodes important parameters + // in the Parameters. + + var params pssParameters + if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, ¶ms); err != nil { + return stdx509.UnknownSignatureAlgorithm + } + + var mgf1HashFunc pkix.AlgorithmIdentifier + if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil { + return stdx509.UnknownSignatureAlgorithm + } + + // PSS is greatly overburdened with options. This code forces them into + // three buckets by requiring that the MGF1 hash function always match the + // message hash function (as recommended in RFC 3447, Section 8.1), that the + // salt length matches the hash length, and that the trailer field has the + // default value. + if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) || + !params.MGF.Algorithm.Equal(oidMGF1) || + !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) || + (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) || + params.TrailerField != 1 { + return stdx509.UnknownSignatureAlgorithm + } + + switch { + case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32: + return stdx509.SHA256WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48: + return stdx509.SHA384WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64: + return stdx509.SHA512WithRSAPSS + } + + return stdx509.UnknownSignatureAlgorithm +} + +var ( + // RFC 3279, 2.3 Public Key Algorithms + // + // pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) + // rsadsi(113549) pkcs(1) 1 } + // + // rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 } + // + // id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) + // x9-57(10040) x9cm(4) 1 } + oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + // RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters + // + // id-ecPublicKey OBJECT IDENTIFIER ::= { + // iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + // RFC 8410, Section 3 + // + // id-X25519 OBJECT IDENTIFIER ::= { 1 3 101 110 } + // id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 } + oidPublicKeyX25519 = asn1.ObjectIdentifier{1, 3, 101, 110} + oidPublicKeyEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} +) + +// getPublicKeyAlgorithmFromOID returns the exposed PublicKeyAlgorithm +// identifier for public key types supported in certificates and CSRs. Marshal +// and Parse functions may support a different set of public key types. +func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) stdx509.PublicKeyAlgorithm { + switch { + case oid.Equal(oidPublicKeyRSA): + return stdx509.RSA + case oid.Equal(oidPublicKeyDSA): + return stdx509.DSA + case oid.Equal(oidPublicKeyECDSA): + return stdx509.ECDSA + case oid.Equal(oidPublicKeyEd25519): + return stdx509.Ed25519 + } + return stdx509.UnknownPublicKeyAlgorithm +} + +// RFC 5480, 2.1.1.1. Named Curve +// +// secp224r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 33 } +// +// secp256r1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) +// prime(1) 7 } +// +// secp384r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 34 } +// +// secp521r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 35 } +// +// NB: secp256r1 is equivalent to prime256v1 +var ( + oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} +) + +func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve { + switch { + case oid.Equal(oidNamedCurveP224): + return elliptic.P224() + case oid.Equal(oidNamedCurveP256): + return elliptic.P256() + case oid.Equal(oidNamedCurveP384): + return elliptic.P384() + case oid.Equal(oidNamedCurveP521): + return elliptic.P521() + } + return nil +} + +// KeyUsage represents the set of actions that are valid for a given key. It's +// a bitmap of the KeyUsage* constants. +type KeyUsage int + +const ( + KeyUsageDigitalSignature KeyUsage = 1 << iota + KeyUsageContentCommitment + KeyUsageKeyEncipherment + KeyUsageDataEncipherment + KeyUsageKeyAgreement + KeyUsageCertSign + KeyUsageCRLSign + KeyUsageEncipherOnly + KeyUsageDecipherOnly +) + +// RFC 5280, 4.2.1.12 Extended Key Usage +// +// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } +// +// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } +// +// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } +// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } +// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } +// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } +// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } +// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } +var ( + oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0} + oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1} + oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2} + oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3} + oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4} + oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5} + oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6} + oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7} + oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8} + oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9} + oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3} + oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1} + oidExtKeyUsageMicrosoftCommercialCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 2, 1, 22} + oidExtKeyUsageMicrosoftKernelCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 61, 1, 1} +) + +// ExtKeyUsage represents an extended set of actions that are valid for a given key. +// Each of the ExtKeyUsage* constants define a unique action. +type ExtKeyUsage int + +const ( + ExtKeyUsageAny ExtKeyUsage = iota + ExtKeyUsageServerAuth + ExtKeyUsageClientAuth + ExtKeyUsageCodeSigning + ExtKeyUsageEmailProtection + ExtKeyUsageIPSECEndSystem + ExtKeyUsageIPSECTunnel + ExtKeyUsageIPSECUser + ExtKeyUsageTimeStamping + ExtKeyUsageOCSPSigning + ExtKeyUsageMicrosoftServerGatedCrypto + ExtKeyUsageNetscapeServerGatedCrypto + ExtKeyUsageMicrosoftCommercialCodeSigning + ExtKeyUsageMicrosoftKernelCodeSigning +) + +// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID. +var extKeyUsageOIDs = []struct { + extKeyUsage ExtKeyUsage + oid asn1.ObjectIdentifier +}{ + {ExtKeyUsageAny, oidExtKeyUsageAny}, + {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth}, + {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth}, + {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning}, + {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection}, + {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem}, + {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel}, + {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser}, + {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping}, + {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning}, + {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto}, + {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto}, + {ExtKeyUsageMicrosoftCommercialCodeSigning, oidExtKeyUsageMicrosoftCommercialCodeSigning}, + {ExtKeyUsageMicrosoftKernelCodeSigning, oidExtKeyUsageMicrosoftKernelCodeSigning}, +} + +func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) { + for _, pair := range extKeyUsageOIDs { + if oid.Equal(pair.oid) { + return pair.extKeyUsage, true + } + } + return +} + +const ( + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +var ( + oidExtensionAuthorityInfoAccess = []int{1, 3, 6, 1, 5, 5, 7, 1, 1} +) + +var ( + oidAuthorityInfoAccessOcsp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1} + oidAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2} +) + +func isIA5String(s string) error { + for _, r := range s { + // Per RFC5280 "IA5String is limited to the set of ASCII characters" + if r > unicode.MaxASCII { + return fmt.Errorf("x509: %q cannot be encoded as an IA5String", s) + } + } + + return nil +} diff --git a/vendor/go.mozilla.org/pkcs7/pkcs7.go b/vendor/github.com/smallstep/pkcs7/pkcs7.go similarity index 70% rename from vendor/go.mozilla.org/pkcs7/pkcs7.go rename to vendor/github.com/smallstep/pkcs7/pkcs7.go index ccc6cc6d..f6c6dfbb 100644 --- a/vendor/go.mozilla.org/pkcs7/pkcs7.go +++ b/vendor/github.com/smallstep/pkcs7/pkcs7.go @@ -13,8 +13,11 @@ import ( "errors" "fmt" "sort" + "sync" _ "crypto/sha1" // for crypto.SHA1 + + legacyx509 "github.com/smallstep/pkcs7/internal/legacy/x509" ) // PKCS7 Represents a PKCS7 structure @@ -31,7 +34,7 @@ type contentInfo struct { Content asn1.RawValue `asn1:"explicit,optional,tag:0"` } -// ErrUnsupportedContentType is returned when a PKCS7 content is not supported. +// ErrUnsupportedContentType is returned when a PKCS7 content type is not supported. // Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2), // and Enveloped Data are supported (1.2.840.113549.1.7.3) var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type") @@ -53,6 +56,7 @@ var ( OIDDigestAlgorithmSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} OIDDigestAlgorithmSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} OIDDigestAlgorithmSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + OIDDigestAlgorithmSHA224 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 4} OIDDigestAlgorithmDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} OIDDigestAlgorithmDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} @@ -63,23 +67,28 @@ var ( OIDDigestAlgorithmECDSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} // Signature Algorithms - OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} - OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} - OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} - OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} - OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + OIDEncryptionAlgorithmRSAMD5 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} // see https://www.rfc-editor.org/rfc/rfc8017#appendix-A.2.4 + OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} // ditto + OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} // ditto + OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} // ditto + OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} // ditto + OIDEncryptionAlgorithmRSASHA224 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 14} // ditto OIDEncryptionAlgorithmECDSAP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} OIDEncryptionAlgorithmECDSAP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} OIDEncryptionAlgorithmECDSAP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} - // Encryption Algorithms - OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} - OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} - OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} - OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} - OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} - OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} + // Asymmetric Encryption Algorithms + OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} // see https://www.rfc-editor.org/rfc/rfc8017#appendix-A.2.2 + OIDEncryptionAlgorithmRSAESOAEP = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 7} // see https://www.rfc-editor.org/rfc/rfc8017#appendix-A.2.1 + + // Symmetric Encryption Algorithms + OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} // see https://www.rfc-editor.org/rfc/rfc8018.html#appendix-B.2.1 + OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} // see https://www.rfc-editor.org/rfc/rfc8018.html#appendix-B.2.2 + OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} // see https://www.rfc-editor.org/rfc/rfc3565.html#section-4.1 + OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} // see https://www.rfc-editor.org/rfc/rfc5084.html#section-3.2 + OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} // see https://www.rfc-editor.org/rfc/rfc8018.html#appendix-B.2.5 + OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} // see https://www.rfc-editor.org/rfc/rfc5084.html#section-3.2 ) func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) { @@ -114,11 +123,11 @@ func getDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1. return nil, fmt.Errorf("pkcs7: cannot convert hash to oid, unknown hash algorithm") } -// getOIDForEncryptionAlgorithm takes the private key type of the signer and +// getOIDForEncryptionAlgorithm takes the public or private key type of the signer and // the OID of a digest algorithm to return the appropriate signerInfo.DigestEncryptionAlgorithm -func getOIDForEncryptionAlgorithm(pkey crypto.PrivateKey, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) { - switch pkey.(type) { - case *rsa.PrivateKey: +func getOIDForEncryptionAlgorithm(pkey interface{}, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) { + switch k := pkey.(type) { + case *rsa.PrivateKey, *rsa.PublicKey: switch { default: return OIDEncryptionAlgorithmRSA, nil @@ -133,7 +142,7 @@ func getOIDForEncryptionAlgorithm(pkey crypto.PrivateKey, OIDDigestAlg asn1.Obje case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): return OIDEncryptionAlgorithmRSASHA512, nil } - case *ecdsa.PrivateKey: + case *ecdsa.PrivateKey, *ecdsa.PublicKey: switch { case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): return OIDDigestAlgorithmECDSASHA1, nil @@ -144,8 +153,13 @@ func getOIDForEncryptionAlgorithm(pkey crypto.PrivateKey, OIDDigestAlg asn1.Obje case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): return OIDDigestAlgorithmECDSASHA512, nil } - case *dsa.PrivateKey: + case *dsa.PrivateKey, *dsa.PublicKey: return OIDDigestAlgorithmDSA, nil + case crypto.Signer: + // This generic case is here to cover types from other packages. It + // was specifically added to handle the private keyRSA type in the + // github.com/go-piv/piv-go/piv package. + return getOIDForEncryptionAlgorithm(k.Public(), OIDDigestAlg) } return nil, fmt.Errorf("pkcs7: cannot convert encryption algorithm to oid, unknown private key type %T", pkey) @@ -202,6 +216,40 @@ func parseEncryptedData(data []byte) (*PKCS7, error) { }, nil } +// SetFallbackLegacyX509CertificateParserEnabled enables parsing certificates +// embedded in a PKCS7 message using the logic from crypto/x509 from before +// Go 1.23. Go 1.23 introduced a breaking change in case a certificate contains +// a critical authority key identifier, which is the correct thing to do based +// on RFC 5280, but it breaks Windows devices performing the Simple Certificate +// Enrolment Protocol (SCEP), as the certificates embedded in those requests +// apparently have authority key identifier extensions marked critical. +// +// See https://go-review.googlesource.com/c/go/+/562341 for the change in the +// Go source. +// +// When [SetFallbackLegacyX509CertificateParserEnabled] is called with true, it +// enables parsing using the legacy crypto/x509 certificate parser. It'll first +// try to parse the certificates using the regular Go crypto/x509 package, but +// if it fails on the above case, it'll retry parsing the certificates using a +// copy of the crypto/x509 package based on Go 1.23, but skips checking the +// authority key identifier extension being critical or not. +func SetFallbackLegacyX509CertificateParserEnabled(v bool) { + legacyX509CertificateParser.Lock() + legacyX509CertificateParser.enabled = v + legacyX509CertificateParser.Unlock() +} + +var legacyX509CertificateParser struct { + sync.RWMutex + enabled bool +} + +func isLegacyX509ParserEnabled() bool { + legacyX509CertificateParser.RLock() + defer legacyX509CertificateParser.RUnlock() + return legacyX509CertificateParser.enabled +} + func (raw rawCertificates) Parse() ([]*x509.Certificate, error) { if len(raw.Raw) == 0 { return nil, nil @@ -212,7 +260,14 @@ func (raw rawCertificates) Parse() ([]*x509.Certificate, error) { return nil, err } - return x509.ParseCertificates(val.Bytes) + certificates, err := x509.ParseCertificates(val.Bytes) + if err != nil && err.Error() == "x509: authority key identifier incorrectly marked critical" { + if isLegacyX509ParserEnabled() { + certificates, err = legacyx509.ParseCertificates(val.Bytes) + } + } + + return certificates, err } func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool { diff --git a/vendor/go.mozilla.org/pkcs7/sign.go b/vendor/github.com/smallstep/pkcs7/sign.go similarity index 100% rename from vendor/go.mozilla.org/pkcs7/sign.go rename to vendor/github.com/smallstep/pkcs7/sign.go diff --git a/vendor/go.mozilla.org/pkcs7/verify.go b/vendor/github.com/smallstep/pkcs7/verify.go similarity index 87% rename from vendor/go.mozilla.org/pkcs7/verify.go rename to vendor/github.com/smallstep/pkcs7/verify.go index f09e2724..7525f918 100644 --- a/vendor/go.mozilla.org/pkcs7/verify.go +++ b/vendor/github.com/smallstep/pkcs7/verify.go @@ -54,6 +54,21 @@ func (p7 *PKCS7) VerifyWithChainAtTime(truststore *x509.CertPool, currentTime ti return nil } +// SigningTimeNotValidError is returned when the signing time attribute +// falls outside of the signer certificate validity. +type SigningTimeNotValidError struct { + SigningTime time.Time + NotBefore time.Time // NotBefore of signer + NotAfter time.Time // NotAfter of signer +} + +func (e *SigningTimeNotValidError) Error() string { + return fmt.Sprintf("pkcs7: signing time %q is outside of certificate validity %q to %q", + e.SigningTime.Format(time.RFC3339), + e.NotBefore.Format(time.RFC3339), + e.NotAfter.Format(time.RFC3339)) +} + func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool, currentTime time.Time) (err error) { signedData := p7.Content ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) @@ -91,10 +106,11 @@ func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPo if err == nil { // signing time found, performing validity check if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { - return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", - signingTime.Format(time.RFC3339), - ee.NotBefore.Format(time.RFC3339), - ee.NotAfter.Format(time.RFC3339)) + return &SigningTimeNotValidError{ + SigningTime: signingTime, + NotBefore: ee.NotBefore, + NotAfter: ee.NotAfter, + } } } } @@ -146,10 +162,11 @@ func verifySignature(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool) (e if err == nil { // signing time found, performing validity check if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { - return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", - signingTime.Format(time.RFC3339), - ee.NotBefore.Format(time.RFC3339), - ee.NotAfter.Format(time.RFC3339)) + return &SigningTimeNotValidError{ + SigningTime: signingTime, + NotBefore: ee.NotBefore, + NotAfter: ee.NotAfter, + } } } } @@ -210,8 +227,13 @@ func parseSignedData(data []byte) (*PKCS7, error) { // Compound octet string if compound.IsCompound { if compound.Tag == 4 { - if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil { - return nil, err + for len(compound.Bytes) > 0 { + var cdata asn1.RawValue + if _, err = asn1.Unmarshal(compound.Bytes, &cdata); err != nil { + return nil, err + } + content = append(content, cdata.Bytes...) + compound.Bytes = compound.Bytes[len(cdata.FullBytes):] } } else { content = compound.Bytes @@ -278,13 +300,13 @@ func getSignatureAlgorithm(digestEncryption, digest pkix.AlgorithmIdentifier) (x digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384), digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512): switch { - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA1): return x509.SHA1WithRSA, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA256): return x509.SHA256WithRSA, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384): return x509.SHA384WithRSA, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512): return x509.SHA512WithRSA, nil default: return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go index 91dd430c..6fc80512 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -23,21 +23,19 @@ var errAlignmentOverflow = errors.New("integer overflow when calculating alignme // nextAligned finds the next offset that satisfies alignment. func nextAligned(offset int64, alignment int) (int64, error) { - align64 := uint64(alignment) - offset64 := uint64(offset) + align64 := int64(alignment) - if align64 <= 0 || offset64%align64 == 0 { + if align64 <= 0 || offset%align64 == 0 { return offset, nil } - offset64 += (align64 - offset64%align64) + align64 -= offset % align64 - if offset64 > math.MaxInt64 { + if (math.MaxInt64 - offset) < align64 { return 0, errAlignmentOverflow } - //nolint:gosec // Overflow handled above. - return int64(offset64), nil + return offset + align64, nil } // writeDataObjectAt writes the data object described by di to ws, using time t, recording details diff --git a/vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go b/vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go index 7b9975e3..9bf8f4fe 100644 --- a/vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go +++ b/vendor/github.com/tchap/go-patricia/v2/patricia/patricia.go @@ -465,7 +465,7 @@ func (trie *Trie) compact() *Trie { // If any item is set, we cannot compact since we want to retain // the ability to do searching by key. This makes compaction less usable, // but that simply cannot be avoided. - if trie.item != nil || child.item != nil { + if child == nil || trie.item != nil || child.item != nil { return trie } diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/format.go b/vendor/github.com/vbatts/tar-split/archive/tar/format.go index 1f89d0c5..60977980 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/format.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/format.go @@ -143,6 +143,10 @@ const ( blockSize = 512 // Size of each block in a tar stream nameSize = 100 // Max length of the name field in USTAR format prefixSize = 155 // Max length of the prefix field in USTAR format + + // Max length of a special file (PAX header, GNU long name or link). + // This matches the limit used by libarchive. + maxSpecialFileSize = 1 << 20 ) // blockPadding computes the number of bytes needed to pad offset up to the diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index 6a6b3e01..248a7ccb 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -144,7 +144,7 @@ func (tr *Reader) next() (*Header, error) { continue // This is a meta header affecting the next header case TypeGNULongName, TypeGNULongLink: format.mayOnlyBe(FormatGNU) - realname, err := io.ReadAll(tr) + realname, err := readSpecialFile(tr) if err != nil { return nil, err } @@ -338,7 +338,7 @@ func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { // parsePAX parses PAX headers. // If an extended header (type 'x') is invalid, ErrHeader is returned func parsePAX(r io.Reader) (map[string]string, error) { - buf, err := io.ReadAll(r) + buf, err := readSpecialFile(r) if err != nil { return nil, err } @@ -889,6 +889,16 @@ func tryReadFull(r io.Reader, b []byte) (n int, err error) { return n, err } +// readSpecialFile is like io.ReadAll except it returns +// ErrFieldTooLong if more than maxSpecialFileSize is read. +func readSpecialFile(r io.Reader) ([]byte, error) { + buf, err := io.ReadAll(io.LimitReader(r, maxSpecialFileSize+1)) + if len(buf) > maxSpecialFileSize { + return nil, ErrFieldTooLong + } + return buf, err +} + // discard skips n bytes in r, reporting an error if unable to do so. func discard(tr *Reader, n int64) error { var seekSkipped, copySkipped int64 diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go index e80498d0..893eac00 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go @@ -199,6 +199,9 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { flag = TypeXHeader } data := buf.String() + if len(data) > maxSpecialFileSize { + return ErrFieldTooLong + } if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { return err // Global headers return here } diff --git a/vendor/github.com/vbauerster/mpb/v8/README.md b/vendor/github.com/vbauerster/mpb/v8/README.md index af97c92a..05088f24 100644 --- a/vendor/github.com/vbauerster/mpb/v8/README.md +++ b/vendor/github.com/vbauerster/mpb/v8/README.md @@ -104,14 +104,14 @@ func main() { p.Wait() ``` -#### [Dynamic total](_examples/dynTotal/main.go) +#### [dynTotal example](_examples/dynTotal/main.go) -![dynamic total](_svg/godEMrCZmJkHYH1X9dN4Nm0U7.svg) +![dynTotal](_svg/godEMrCZmJkHYH1X9dN4Nm0U7.svg) -#### [Complex example](_examples/complex/main.go) +#### [complex example](_examples/complex/main.go) ![complex](_svg/wHzf1M7sd7B3zVa2scBMnjqRf.svg) -#### [Bytes counters](_examples/io/main.go) +#### [io example](_examples/io/main.go) -![byte counters](_svg/hIpTa3A5rQz65ssiVuRJu87X6.svg) +![io](_svg/hIpTa3A5rQz65ssiVuRJu87X6.svg) diff --git a/vendor/github.com/vbauerster/mpb/v8/bar.go b/vendor/github.com/vbauerster/mpb/v8/bar.go index 73753f02..db4f99c2 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -45,7 +45,7 @@ type bState struct { noPop bool autoRefresh bool buffers [3]*bytes.Buffer - decorators [2][]decor.Decorator + decorGroups [2][]decor.Decorator ewmaDecorators []decor.EwmaDecorator filler BarFiller extender extenderFunc @@ -117,7 +117,7 @@ func (b *Bar) ProxyWriter(w io.Writer) io.WriteCloser { } } -// ID returs id of the bar. +// ID returns id of the bar. func (b *Bar) ID() int { result := make(chan int) select { @@ -145,42 +145,28 @@ func (b *Bar) Current() int64 { // operation for example. func (b *Bar) SetRefill(amount int64) { select { - case b.operateState <- func(s *bState) { - if amount < s.current { - s.refill = amount - } else { - s.refill = s.current - } - }: + case b.operateState <- func(s *bState) { s.refill = min(amount, s.current) }: case <-b.ctx.Done(): } } -// TraverseDecorators traverses available decorators and calls cb func -// on each in a new goroutine. Decorators implementing decor.Wrapper -// interface are unwrapped first. +// TraverseDecorators traverses available decorators and calls `cb` +// on each unwrapped one. func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { select { case b.operateState <- func(s *bState) { - var wg sync.WaitGroup - for _, decorators := range s.decorators { - wg.Add(len(decorators)) - for _, d := range decorators { - d := d - go func() { - cb(unwrap(d)) - wg.Done() - }() + for _, group := range s.decorGroups { + for _, d := range group { + cb(unwrap(d)) } } - wg.Wait() }: case <-b.ctx.Done(): } } // EnableTriggerComplete enables triggering complete event. It's effective -// only for bars which were constructed with `total <= 0`. If `curren >= total` +// only for bars which were constructed with `total <= 0`. If `current >= total` // at the moment of call, complete event is triggered right away. func (b *Bar) EnableTriggerComplete() { select { @@ -283,10 +269,10 @@ func (b *Bar) EwmaIncrInt64(n int64, iterDur time.Duration) { var wg sync.WaitGroup wg.Add(len(s.ewmaDecorators)) for _, d := range s.ewmaDecorators { - d := d + // d := d // NOTE: uncomment for Go < 1.22, see /doc/faq#closures_and_goroutines go func() { + defer wg.Done() d.EwmaUpdate(n, iterDur) - wg.Done() }() } s.current += n @@ -312,10 +298,10 @@ func (b *Bar) EwmaSetCurrent(current int64, iterDur time.Duration) { var wg sync.WaitGroup wg.Add(len(s.ewmaDecorators)) for _, d := range s.ewmaDecorators { - d := d + // d := d // NOTE: uncomment for Go < 1.22, see /doc/faq#closures_and_goroutines go func() { + defer wg.Done() d.EwmaUpdate(n, iterDur) - wg.Done() }() } s.current = current @@ -402,13 +388,14 @@ func (b *Bar) Wait() { } func (b *Bar) serve(bs *bState) { - decoratorsOnShutdown := func(decorators []decor.Decorator) { - for _, d := range decorators { + defer b.container.bwg.Done() + decoratorsOnShutdown := func(group []decor.Decorator) { + for _, d := range group { if d, ok := unwrap(d).(decor.ShutdownListener); ok { b.container.bwg.Add(1) go func() { + defer b.container.bwg.Done() d.OnShutdown() - b.container.bwg.Done() }() } } @@ -418,13 +405,12 @@ func (b *Bar) serve(bs *bState) { case op := <-b.operateState: op(bs) case <-b.ctx.Done(): - decoratorsOnShutdown(bs.decorators[0]) - decoratorsOnShutdown(bs.decorators[1]) + decoratorsOnShutdown(bs.decorGroups[0]) + decoratorsOnShutdown(bs.decorGroups[1]) // bar can be aborted by canceling parent ctx without calling b.Abort bs.aborted = !bs.completed() b.bs = bs close(b.bsOk) - b.container.bwg.Done() return } } @@ -491,9 +477,9 @@ func (b *Bar) wSyncTable() syncTable { } func (s *bState) draw(stat decor.Statistics) (_ io.Reader, err error) { - decorFiller := func(buf *bytes.Buffer, decorators []decor.Decorator) (err error) { - for _, d := range decorators { - // need to call Decor in any case becase of width synchronization + decorFiller := func(buf *bytes.Buffer, group []decor.Decorator) (err error) { + for _, d := range group { + // need to call Decor in any case because of width synchronization str, width := d.Decor(stat) if err != nil { continue @@ -511,7 +497,7 @@ func (s *bState) draw(stat decor.Statistics) (_ io.Reader, err error) { } for i, buf := range s.buffers[:2] { - err = decorFiller(buf, s.decorators[i]) + err = decorFiller(buf, s.decorGroups[i]) if err != nil { return nil, err } @@ -545,34 +531,20 @@ func (s *bState) draw(stat decor.Statistics) (_ io.Reader, err error) { } func (s *bState) wSyncTable() (table syncTable) { - var count int + var start int var row []chan int - for i, decorators := range s.decorators { - for _, d := range decorators { + for i, group := range s.decorGroups { + for _, d := range group { if ch, ok := d.Sync(); ok { row = append(row, ch) - count++ } } - switch i { - case 0: - table[i] = row[0:count] - default: - table[i] = row[len(table[i-1]):count] - } + table[i], start = row[start:], len(row) } return table } -func (s *bState) populateEwmaDecorators(decorators []decor.Decorator) { - for _, d := range decorators { - if d, ok := unwrap(d).(decor.EwmaDecorator); ok { - s.ewmaDecorators = append(s.ewmaDecorators, d) - } - } -} - func (s *bState) triggerCompletion(b *Bar) { s.triggerComplete = true if s.autoRefresh { diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go index 7a036d98..4dca113d 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go @@ -10,15 +10,15 @@ import ( const ( iLbound = iota - iRbound iRefiller iFiller iTip iPadding - components + iRbound + iLen ) -var defaultBarStyle = [components]string{"[", "]", "+", "=", ">", "-"} +var defaultBarStyle = [iLen]string{"[", "+", "=", ">", "-", "]"} // BarStyleComposer interface. type BarStyleComposer interface { @@ -44,15 +44,17 @@ type component struct { bytes []byte } -type flushSection struct { - meta func(io.Writer, []byte) error +type barSection struct { + meta func(string) string bytes []byte } -type bFiller struct { - components [components]component - meta [components]func(io.Writer, []byte) error - flush func(io.Writer, ...flushSection) error +type barSections [iLen]barSection + +type barFiller struct { + components [iLen]component + metas [iLen]func(string) string + flushOp func(barSections, io.Writer) error tip struct { onComplete bool count uint @@ -61,8 +63,8 @@ type bFiller struct { } type barStyle struct { - style [components]string - metaFuncs [components]func(io.Writer, []byte) error + style [iLen]string + metas [iLen]func(string) string tipFrames []string tipOnComplete bool rev bool @@ -75,9 +77,6 @@ func BarStyle() BarStyleComposer { style: defaultBarStyle, tipFrames: []string{defaultBarStyle[iTip]}, } - for i := range bs.metaFuncs { - bs.metaFuncs[i] = defaultMeta - } return bs } @@ -87,7 +86,7 @@ func (s barStyle) Lbound(bound string) BarStyleComposer { } func (s barStyle) LboundMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iLbound] = makeMetaFunc(fn) + s.metas[iLbound] = fn return s } @@ -97,7 +96,7 @@ func (s barStyle) Rbound(bound string) BarStyleComposer { } func (s barStyle) RboundMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iRbound] = makeMetaFunc(fn) + s.metas[iRbound] = fn return s } @@ -107,7 +106,7 @@ func (s barStyle) Filler(filler string) BarStyleComposer { } func (s barStyle) FillerMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iFiller] = makeMetaFunc(fn) + s.metas[iFiller] = fn return s } @@ -117,7 +116,7 @@ func (s barStyle) Refiller(refiller string) BarStyleComposer { } func (s barStyle) RefillerMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iRefiller] = makeMetaFunc(fn) + s.metas[iRefiller] = fn return s } @@ -127,7 +126,7 @@ func (s barStyle) Padding(padding string) BarStyleComposer { } func (s barStyle) PaddingMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iPadding] = makeMetaFunc(fn) + s.metas[iPadding] = fn return s } @@ -139,7 +138,7 @@ func (s barStyle) Tip(frames ...string) BarStyleComposer { } func (s barStyle) TipMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iTip] = makeMetaFunc(fn) + s.metas[iTip] = fn return s } @@ -154,9 +153,7 @@ func (s barStyle) Reverse() BarStyleComposer { } func (s barStyle) Build() BarFiller { - bf := &bFiller{ - meta: s.metaFuncs, - } + bf := &barFiller{metas: s.metas} bf.components[iLbound] = component{ width: runewidth.StringWidth(s.style[iLbound]), bytes: []byte(s.style[iLbound]), @@ -178,42 +175,22 @@ func (s barStyle) Build() BarFiller { bytes: []byte(s.style[iPadding]), } bf.tip.onComplete = s.tipOnComplete - bf.tip.frames = make([]component, len(s.tipFrames)) - for i, t := range s.tipFrames { - bf.tip.frames[i] = component{ + bf.tip.frames = make([]component, 0, len(s.tipFrames)) + for _, t := range s.tipFrames { + bf.tip.frames = append(bf.tip.frames, component{ width: runewidth.StringWidth(t), bytes: []byte(t), - } + }) } if s.rev { - bf.flush = func(w io.Writer, sections ...flushSection) error { - for i := len(sections) - 1; i >= 0; i-- { - if s := sections[i]; len(s.bytes) != 0 { - err := s.meta(w, s.bytes) - if err != nil { - return err - } - } - } - return nil - } + bf.flushOp = barSections.flushRev } else { - bf.flush = func(w io.Writer, sections ...flushSection) error { - for _, s := range sections { - if len(s.bytes) != 0 { - err := s.meta(w, s.bytes) - if err != nil { - return err - } - } - } - return nil - } + bf.flushOp = barSections.flush } return bf } -func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { +func (s *barFiller) Fill(w io.Writer, stat decor.Statistics) error { width := internal.CheckRequestedWidth(stat.RequestedWidth, stat.AvailableWidth) // don't count brackets as progress width -= (s.components[iLbound].width + s.components[iRbound].width) @@ -221,15 +198,6 @@ func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { return nil } - err := s.meta[iLbound](w, s.components[iLbound].bytes) - if err != nil { - return err - } - - if width == 0 { - return s.meta[iRbound](w, s.components[iRbound].bytes) - } - var tip component var refilling, filling, padding []byte var fillCount int @@ -265,26 +233,42 @@ func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { padding = append(padding, "…"...) } - err = s.flush(w, - flushSection{s.meta[iRefiller], refilling}, - flushSection{s.meta[iFiller], filling}, - flushSection{s.meta[iTip], tip.bytes}, - flushSection{s.meta[iPadding], padding}, - ) - if err != nil { - return err + return s.flushOp(barSections{ + {s.metas[iLbound], s.components[iLbound].bytes}, + {s.metas[iRefiller], refilling}, + {s.metas[iFiller], filling}, + {s.metas[iTip], tip.bytes}, + {s.metas[iPadding], padding}, + {s.metas[iRbound], s.components[iRbound].bytes}, + }, w) +} + +func (s barSection) flush(w io.Writer) (err error) { + if s.meta != nil { + _, err = io.WriteString(w, s.meta(string(s.bytes))) + } else { + _, err = w.Write(s.bytes) } - return s.meta[iRbound](w, s.components[iRbound].bytes) + return err } -func makeMetaFunc(fn func(string) string) func(io.Writer, []byte) error { - return func(w io.Writer, p []byte) (err error) { - _, err = io.WriteString(w, fn(string(p))) - return err +func (bb barSections) flush(w io.Writer) error { + for _, s := range bb { + err := s.flush(w) + if err != nil { + return err + } } + return nil } -func defaultMeta(w io.Writer, p []byte) (err error) { - _, err = w.Write(p) - return err +func (bb barSections) flushRev(w io.Writer) error { + bb[0], bb[len(bb)-1] = bb[len(bb)-1], bb[0] + for i := len(bb) - 1; i >= 0; i-- { + err := bb[i].flush(w) + if err != nil { + return err + } + } + return nil } diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go index c9fd463e..56075810 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go @@ -24,7 +24,7 @@ type SpinnerStyleComposer interface { Meta(func(string) string) SpinnerStyleComposer } -type sFiller struct { +type spinnerFiller struct { frames []string count uint meta func(string) string @@ -40,9 +40,7 @@ type spinnerStyle struct { // SpinnerStyle constructs default spinner style which can be altered via // SpinnerStyleComposer interface. func SpinnerStyle(frames ...string) SpinnerStyleComposer { - ss := spinnerStyle{ - meta: func(s string) string { return s }, - } + var ss spinnerStyle if len(frames) != 0 { ss.frames = frames } else { @@ -67,10 +65,7 @@ func (s spinnerStyle) Meta(fn func(string) string) SpinnerStyleComposer { } func (s spinnerStyle) Build() BarFiller { - sf := &sFiller{ - frames: s.frames, - meta: s.meta, - } + sf := &spinnerFiller{frames: s.frames} switch s.position { case positionLeft: sf.position = func(frame string, padWidth int) string { @@ -85,10 +80,15 @@ func (s spinnerStyle) Build() BarFiller { return strings.Repeat(" ", padWidth/2) + frame + strings.Repeat(" ", padWidth/2+padWidth%2) } } + if s.meta != nil { + sf.meta = s.meta + } else { + sf.meta = func(s string) string { return s } + } return sf } -func (s *sFiller) Fill(w io.Writer, stat decor.Statistics) error { +func (s *spinnerFiller) Fill(w io.Writer, stat decor.Statistics) error { width := internal.CheckRequestedWidth(stat.RequestedWidth, stat.AvailableWidth) frame := s.frames[s.count%uint(len(s.frames))] frameWidth := runewidth.StringWidth(frame) diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_option.go b/vendor/github.com/vbauerster/mpb/v8/bar_option.go index 6247a334..6c1b7e6f 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_option.go @@ -10,31 +10,29 @@ import ( // BarOption is a func option to alter default behavior of a bar. type BarOption func(*bState) -func inspect(decorators []decor.Decorator) (dest []decor.Decorator) { +// PrependDecorators let you inject decorators to the bar's left side. +func PrependDecorators(decorators ...decor.Decorator) BarOption { + var group []decor.Decorator for _, decorator := range decorators { - if decorator == nil { - continue + if decorator != nil { + group = append(group, decorator) } - dest = append(dest, decorator) } - return -} - -// PrependDecorators let you inject decorators to the bar's left side. -func PrependDecorators(decorators ...decor.Decorator) BarOption { - decorators = inspect(decorators) return func(s *bState) { - s.populateEwmaDecorators(decorators) - s.decorators[0] = decorators + s.decorGroups[0] = group } } // AppendDecorators let you inject decorators to the bar's right side. func AppendDecorators(decorators ...decor.Decorator) BarOption { - decorators = inspect(decorators) + var group []decor.Decorator + for _, decorator := range decorators { + if decorator != nil { + group = append(group, decorator) + } + } return func(s *bState) { - s.populateEwmaDecorators(decorators) - s.decorators[1] = decorators + s.decorGroups[1] = group } } @@ -88,6 +86,25 @@ func BarFillerOnComplete(message string) BarOption { }) } +// BarFillerClearOnAbort clears bar's filler on abort event. +// It's shortcut for BarFillerOnAbort(""). +func BarFillerClearOnAbort() BarOption { + return BarFillerOnAbort("") +} + +// BarFillerOnAbort replaces bar's filler with message, on abort event. +func BarFillerOnAbort(message string) BarOption { + return BarFillerMiddleware(func(base BarFiller) BarFiller { + return BarFillerFunc(func(w io.Writer, st decor.Statistics) error { + if st.Aborted { + _, err := io.WriteString(w, message) + return err + } + return base.Fill(w, st) + }) + }) +} + // BarFillerMiddleware provides a way to augment the underlying BarFiller. func BarFillerMiddleware(middle func(BarFiller) BarFiller) BarOption { if middle == nil { diff --git a/vendor/github.com/vbauerster/mpb/v8/container_option.go b/vendor/github.com/vbauerster/mpb/v8/container_option.go index 177620e0..85e12f22 100644 --- a/vendor/github.com/vbauerster/mpb/v8/container_option.go +++ b/vendor/github.com/vbauerster/mpb/v8/container_option.go @@ -30,6 +30,17 @@ func WithWidth(width int) ContainerOption { } } +// WithQueueLen sets buffer size of heap manager channel. Ideally it must be +// kept at MAX value, where MAX is number of bars to be rendered at the same +// time. If len < MAX then backpressure to the scheduler will be increased as +// MAX-len extra goroutines will be launched at each render cycle. +// Default queue len is 128. +func WithQueueLen(len int) ContainerOption { + return func(s *pState) { + s.hmQueueLen = len + } +} + // WithRefreshRate overrides default 150ms refresh rate. func WithRefreshRate(d time.Duration) ContainerOption { return func(s *pState) { diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go index bf48d3ee..d8fcc2b9 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go @@ -15,7 +15,7 @@ var ( _ AverageDecorator = (*averageETA)(nil) ) -// TimeNormalizer interface. Implementors could be passed into +// TimeNormalizer interface. Implementers could be passed into // MovingAverageETA, in order to affect i.e. normalize its output. type TimeNormalizer interface { Normalize(time.Duration) time.Duration diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go index d9950b61..90ecda68 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go @@ -1,3 +1,6 @@ +//go:generate go tool stringer -type=SizeB1024 -trimprefix=_i +//go:generate go tool stringer -type=SizeB1000 -trimprefix=_ + package decor import ( @@ -5,9 +8,6 @@ import ( "strconv" ) -//go:generate stringer -type=SizeB1024 -trimprefix=_i -//go:generate stringer -type=SizeB1000 -trimprefix=_ - var ( _ fmt.Formatter = SizeB1024(0) _ fmt.Stringer = SizeB1024(0) @@ -28,14 +28,14 @@ const ( // appropriate size marker (KiB, MiB, GiB, TiB). type SizeB1024 int64 -func (self SizeB1024) Format(st fmt.State, verb rune) { +func (s SizeB1024) Format(f fmt.State, verb rune) { prec := -1 switch verb { case 'f', 'e', 'E': prec = 6 // default prec of fmt.Printf("%f|%e|%E") fallthrough case 'b', 'g', 'G', 'x', 'X': - if p, ok := st.Precision(); ok { + if p, ok := f.Precision(); ok { prec = p } default: @@ -44,24 +44,24 @@ func (self SizeB1024) Format(st fmt.State, verb rune) { var unit SizeB1024 switch { - case self < _iKiB: + case s < _iKiB: unit = _ib - case self < _iMiB: + case s < _iMiB: unit = _iKiB - case self < _iGiB: + case s < _iGiB: unit = _iMiB - case self < _iTiB: + case s < _iTiB: unit = _iGiB default: unit = _iTiB } - b := strconv.AppendFloat(make([]byte, 0, 24), float64(self)/float64(unit), byte(verb), prec, 64) - if st.Flag(' ') { + b := strconv.AppendFloat(make([]byte, 0, 24), float64(s)/float64(unit), byte(verb), prec, 64) + if f.Flag(' ') { b = append(b, ' ') } b = append(b, []byte(unit.String())...) - _, err := st.Write(b) + _, err := f.Write(b) if err != nil { panic(err) } @@ -80,14 +80,14 @@ const ( // appropriate size marker (KB, MB, GB, TB). type SizeB1000 int64 -func (self SizeB1000) Format(st fmt.State, verb rune) { +func (s SizeB1000) Format(f fmt.State, verb rune) { prec := -1 switch verb { case 'f', 'e', 'E': prec = 6 // default prec of fmt.Printf("%f|%e|%E") fallthrough case 'b', 'g', 'G', 'x', 'X': - if p, ok := st.Precision(); ok { + if p, ok := f.Precision(); ok { prec = p } default: @@ -96,24 +96,24 @@ func (self SizeB1000) Format(st fmt.State, verb rune) { var unit SizeB1000 switch { - case self < _KB: + case s < _KB: unit = _b - case self < _MB: + case s < _MB: unit = _KB - case self < _GB: + case s < _GB: unit = _MB - case self < _TB: + case s < _TB: unit = _GB default: unit = _TB } - b := strconv.AppendFloat(make([]byte, 0, 24), float64(self)/float64(unit), byte(verb), prec, 64) - if st.Flag(' ') { + b := strconv.AppendFloat(make([]byte, 0, 24), float64(s)/float64(unit), byte(verb), prec, 64) + if f.Flag(' ') { b = append(b, ' ') } b = append(b, []byte(unit.String())...) - _, err := st.Write(b) + _, err := f.Write(b) if err != nil { panic(err) } diff --git a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go index a680187b..88efb482 100644 --- a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go +++ b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go @@ -10,7 +10,6 @@ const ( h_sync heapCmd = iota h_push h_iter - h_drain h_fix h_state h_end @@ -22,8 +21,9 @@ type heapRequest struct { } type iterData struct { - iter chan<- *Bar - drop <-chan struct{} + drop <-chan struct{} + iter chan<- *Bar + iterPop chan<- *Bar } type pushData struct { @@ -49,9 +49,7 @@ func (m heapManager) run() { case h_push: data := req.data.(pushData) heap.Push(&bHeap, data.bar) - if !sync { - sync = data.sync - } + sync = sync || data.sync case h_sync: if sync || l != bHeap.Len() { pMatrix = make(map[int][]chan int) @@ -73,26 +71,30 @@ func (m heapManager) run() { syncWidth(aMatrix, drop) case h_iter: data := req.data.(iterData) - drop_iter: + loop: // unordered iteration for _, b := range bHeap { select { case data.iter <- b: case <-data.drop: - break drop_iter + data.iterPop = nil + break loop } } close(data.iter) - case h_drain: - data := req.data.(iterData) - drop_drain: + if data.iterPop == nil { + break + } + loop_pop: // ordered iteration for bHeap.Len() != 0 { + bar := heap.Pop(&bHeap).(*Bar) select { - case data.iter <- heap.Pop(&bHeap).(*Bar): + case data.iterPop <- bar: case <-data.drop: - break drop_drain + heap.Push(&bHeap, bar) + break loop_pop } } - close(data.iter) + close(data.iterPop) case h_fix: data := req.data.(fixData) if data.bar.index < 0 { @@ -123,19 +125,21 @@ func (m heapManager) sync(drop <-chan struct{}) { func (m heapManager) push(b *Bar, sync bool) { data := pushData{b, sync} - m <- heapRequest{cmd: h_push, data: data} + req := heapRequest{cmd: h_push, data: data} + select { + case m <- req: + default: + go func() { + m <- req + }() + } } -func (m heapManager) iter(iter chan<- *Bar, drop <-chan struct{}) { - data := iterData{iter, drop} +func (m heapManager) iter(drop <-chan struct{}, iter, iterPop chan<- *Bar) { + data := iterData{drop, iter, iterPop} m <- heapRequest{cmd: h_iter, data: data} } -func (m heapManager) drain(iter chan<- *Bar, drop <-chan struct{}) { - data := iterData{iter, drop} - m <- heapRequest{cmd: h_drain, data: data} -} - func (m heapManager) fix(b *Bar, priority int, lazy bool) { data := fixData{b, priority, lazy} m <- heapRequest{cmd: h_fix, data: data} diff --git a/vendor/github.com/vbauerster/mpb/v8/priority_queue.go b/vendor/github.com/vbauerster/mpb/v8/priority_queue.go index 0863b578..c2f657db 100644 --- a/vendor/github.com/vbauerster/mpb/v8/priority_queue.go +++ b/vendor/github.com/vbauerster/mpb/v8/priority_queue.go @@ -20,18 +20,18 @@ func (pq priorityQueue) Swap(i, j int) { } func (pq *priorityQueue) Push(x interface{}) { - n := len(*pq) - bar := x.(*Bar) - bar.index = n - *pq = append(*pq, bar) + s := *pq + b := x.(*Bar) + b.index = len(s) + *pq = append(s, b) } func (pq *priorityQueue) Pop() interface{} { - old := *pq - n := len(old) - bar := old[n-1] - old[n-1] = nil // avoid memory leak - bar.index = -1 // for safety - *pq = old[:n-1] - return bar + var b *Bar + s := *pq + i := len(s) - 1 + b, s[i] = s[i], nil // nil to avoid memory leak + b.index = -1 // for safety + *pq = s[:i] + return b } diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go index 5c57eafa..851083c4 100644 --- a/vendor/github.com/vbauerster/mpb/v8/progress.go +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -15,9 +15,10 @@ import ( ) const defaultRefreshRate = 150 * time.Millisecond +const defaultHmQueueLength = 128 -// DoneError represents use after `(*Progress).Wait()` error. -var DoneError = fmt.Errorf("%T instance can't be reused after %[1]T.Wait()", (*Progress)(nil)) +// ErrDone represents use after `(*Progress).Wait()` error. +var ErrDone = fmt.Errorf("%T instance can't be reused after %[1]T.Wait()", (*Progress)(nil)) // Progress represents a container that renders one or more progress bars. type Progress struct { @@ -31,16 +32,17 @@ type Progress struct { // pState holds bars in its priorityQueue, it gets passed to (*Progress).serve monitor goroutine. type pState struct { - ctx context.Context - hm heapManager - dropS, dropD chan struct{} - renderReq chan time.Time - idCount int - popPriority int - - // following are provided/overrided by user - refreshRate time.Duration + ctx context.Context + hm heapManager + iterDrop chan struct{} + renderReq chan time.Time + idCount int + popPriority int + + // following are provided/overrode by user + hmQueueLen int reqWidth int + refreshRate time.Duration popCompleted bool autoRefresh bool delayRC <-chan struct{} @@ -68,9 +70,8 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { ctx, cancel := context.WithCancel(ctx) s := &pState{ ctx: ctx, - hm: make(heapManager), - dropS: make(chan struct{}), - dropD: make(chan struct{}), + hmQueueLen: defaultHmQueueLength, + iterDrop: make(chan struct{}), renderReq: make(chan time.Time), popPriority: math.MinInt32, refreshRate: defaultRefreshRate, @@ -85,6 +86,8 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { } } + s.hm = make(heapManager, s.hmQueueLen) + p := &Progress{ uwg: s.uwg, operateState: make(chan func(*pState)), @@ -146,7 +149,7 @@ func (p *Progress) MustAdd(total int64, filler BarFiller, options ...BarOption) // Add creates a bar which renders itself by provided BarFiller. // If `total <= 0` triggering complete event by increment methods // is disabled. If called after `(*Progress).Wait()` then -// `(nil, DoneError)` is returned. +// `(nil, ErrDone)` is returned. func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) (*Bar, error) { if filler == nil { filler = NopStyle().Build() @@ -168,14 +171,14 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) (*Ba }: return <-ch, nil case <-p.done: - return nil, DoneError + return nil, ErrDone } } func (p *Progress) traverseBars(cb func(b *Bar) bool) { - iter, drop := make(chan *Bar), make(chan struct{}) + drop, iter := make(chan struct{}), make(chan *Bar) select { - case p.operateState <- func(s *pState) { s.hm.iter(iter, drop) }: + case p.operateState <- func(s *pState) { s.hm.iter(drop, iter, nil) }: for b := range iter { if !cb(b) { close(drop) @@ -202,8 +205,7 @@ func (p *Progress) UpdateBarPriority(b *Bar, priority int, lazy bool) { // Write is implementation of io.Writer. // Writing to `*Progress` will print lines above a running bar. // Writes aren't flushed immediately, but at next refresh cycle. -// If called after `(*Progress).Wait()` then `(0, DoneError)` -// is returned. +// If called after `(*Progress).Wait()` then `(0, ErrDone)` is returned. func (p *Progress) Write(b []byte) (int, error) { type result struct { n int @@ -218,7 +220,7 @@ func (p *Progress) Write(b []byte) (int, error) { res := <-ch return res.n, res.err case <-p.done: - return 0, DoneError + return 0, ErrDone } } @@ -333,15 +335,15 @@ func (s *pState) manualRefreshListener(done chan struct{}) { } func (s *pState) render(cw *cwriter.Writer) (err error) { - s.hm.sync(s.dropS) - iter := make(chan *Bar) - go s.hm.iter(iter, s.dropS) + iter, iterPop := make(chan *Bar), make(chan *Bar) + s.hm.sync(s.iterDrop) + s.hm.iter(s.iterDrop, iter, iterPop) var width, height int if cw.IsTerminal() { width, height, err = cw.GetTermSize() if err != nil { - close(s.dropS) + close(s.iterDrop) return err } } else { @@ -353,39 +355,36 @@ func (s *pState) render(cw *cwriter.Writer) (err error) { height = width } + var barCount int for b := range iter { + barCount++ go b.render(width) } - return s.flush(cw, height) + return s.flush(cw, height, barCount, iterPop) } -func (s *pState) flush(cw *cwriter.Writer, height int) error { - var wg sync.WaitGroup - defer wg.Wait() // waiting for all s.push to complete - - var popCount int - var rows []io.Reader - - iter := make(chan *Bar) - s.hm.drain(iter, s.dropD) +func (s *pState) flush(cw *cwriter.Writer, height, barCount int, iter <-chan *Bar) error { + var total, popCount int + rows := make([][]io.Reader, 0, barCount) for b := range iter { frame := <-b.frameCh if frame.err != nil { - close(s.dropD) + close(s.iterDrop) b.cancel() return frame.err // b.frameCh is buffered it's ok to return here } - var usedRows int + var discarded int for i := len(frame.rows) - 1; i >= 0; i-- { - if row := frame.rows[i]; len(rows) < height { - rows = append(rows, row) - usedRows++ + if total < height { + total++ } else { - _, _ = io.Copy(io.Discard, row) + _, _ = io.Copy(io.Discard, frame.rows[i]) // Found IsInBounds + discarded++ } } + rows = append(rows, frame.rows) switch frame.shutdown { case 1: @@ -393,42 +392,35 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error { if qb, ok := s.queueBars[b]; ok { delete(s.queueBars, b) qb.priority = b.priority - wg.Add(1) - go s.push(&wg, qb, true) + s.hm.push(qb, true) } else if s.popCompleted && !frame.noPop { b.priority = s.popPriority s.popPriority++ - wg.Add(1) - go s.push(&wg, b, false) + s.hm.push(b, false) } else if !frame.rmOnComplete { - wg.Add(1) - go s.push(&wg, b, false) + s.hm.push(b, false) } case 2: if s.popCompleted && !frame.noPop { - popCount += usedRows + popCount += len(frame.rows) - discarded continue } fallthrough default: - wg.Add(1) - go s.push(&wg, b, false) + s.hm.push(b, false) } } for i := len(rows) - 1; i >= 0; i-- { - _, err := cw.ReadFrom(rows[i]) - if err != nil { - return err + for _, r := range rows[i] { + _, err := cw.ReadFrom(r) + if err != nil { + return err + } } } - return cw.Flush(len(rows) - popCount) -} - -func (s *pState) push(wg *sync.WaitGroup, b *Bar, sync bool) { - s.hm.push(b, sync) - wg.Done() + return cw.Flush(total - popCount) } func (s pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState { @@ -455,6 +447,14 @@ func (s pState) makeBarState(total int64, filler BarFiller, options ...BarOption } } + for _, group := range bs.decorGroups { + for _, d := range group { + if d, ok := unwrap(d).(decor.EwmaDecorator); ok { + bs.ewmaDecorators = append(bs.ewmaDecorators, d) + } + } + } + bs.buffers[0] = bytes.NewBuffer(make([]byte, 0, 128)) // prepend bs.buffers[1] = bytes.NewBuffer(make([]byte, 0, 128)) // append bs.buffers[2] = bytes.NewBuffer(make([]byte, 0, 256)) // filler diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/go.mongodb.org/mongo-driver/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/go.mongodb.org/mongo-driver/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go deleted file mode 100644 index a0d81858..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer -// See THIRD-PARTY-NOTICES for original license terms. - -package bson // import "go.mongodb.org/mongo-driver/bson" - -import ( - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, -// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. -// -// A D should not be constructed with duplicate key names, as that can cause undefined server behavior. -// -// Example usage: -// -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -type D = primitive.D - -// E represents a BSON element for a D. It is usually used inside a D. -type E = primitive.E - -// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not -// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be -// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. -// -// Example usage: -// -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} -type M = primitive.M - -// An A is an ordered representation of a BSON array. -// -// Example usage: -// -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} -type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go deleted file mode 100644 index 652aa48b..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// ArrayCodec is the Codec used for bsoncore.Array values. -// -// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0. -type ArrayCodec struct{} - -var defaultArrayCodec = NewArrayCodec() - -// NewArrayCodec returns an ArrayCodec. -// -// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See -// [ArrayCodec] for more details. -func NewArrayCodec() *ArrayCodec { - return &ArrayCodec{} -} - -// EncodeValue is the ValueEncoder for bsoncore.Array values. -func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreArray { - return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - arr := val.Interface().(bsoncore.Array) - return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr) -} - -// DecodeValue is the ValueDecoder for bsoncore.Array values. -func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreArray { - return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr) - val.Set(reflect.ValueOf(arr)) - return err -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go deleted file mode 100644 index 0693bd43..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec" - -import ( - "fmt" - "reflect" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var ( - emptyValue = reflect.Value{} -) - -// Marshaler is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead. -type Marshaler interface { - MarshalBSON() ([]byte, error) -} - -// ValueMarshaler is an interface implemented by types that can marshal -// themselves into a BSON value as bytes. The type must be the valid type for -// the bytes returned. The bytes and byte type together must be valid if the -// error is nil. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead. -type ValueMarshaler interface { - MarshalBSONValue() (bsontype.Type, []byte, error) -} - -// Unmarshaler is an interface implemented by types that can unmarshal a BSON -// document representation of themselves. The BSON bytes can be assumed to be -// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data -// after returning. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead. -type Unmarshaler interface { - UnmarshalBSON([]byte) error -} - -// ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead. -type ValueUnmarshaler interface { - UnmarshalBSONValue(bsontype.Type, []byte) error -} - -// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be -// encoded by the ValueEncoder. -type ValueEncoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vee ValueEncoderError) Error() string { - typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds)) - for _, t := range vee.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vee.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vee.Received.Kind().String() - if vee.Received.IsValid() { - received = vee.Received.Type().String() - } - return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received) -} - -// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be -// decoded by the ValueDecoder. -type ValueDecoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vde ValueDecoderError) Error() string { - typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds)) - for _, t := range vde.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vde.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vde.Received.Kind().String() - if vde.Received.IsValid() { - received = vde.Received.Type().String() - } - return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received) -} - -// EncodeContext is the contextual information required for a Codec to encode a -// value. -type EncodeContext struct { - *Registry - - // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, - // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) - // that can represent the integer value. - // - // Deprecated: Use bson.Encoder.IntMinSize instead. - MinSize bool - - errorOnInlineDuplicates bool - stringifyMapKeysWithFmt bool - nilMapAsEmpty bool - nilSliceAsEmpty bool - nilByteSliceAsEmpty bool - omitZeroStruct bool - useJSONStructTags bool -} - -// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in -// the marshaled BSON when the "inline" struct tag option is set. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. -func (ec *EncodeContext) ErrorOnInlineDuplicates() { - ec.errorOnInlineDuplicates = true -} - -// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name -// strings using fmt.Sprintf() instead of the default string conversion logic. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. -func (ec *EncodeContext) StringifyMapKeysWithFmt() { - ec.stringifyMapKeysWithFmt = true -} - -// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON -// null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. -func (ec *EncodeContext) NilMapAsEmpty() { - ec.nilMapAsEmpty = true -} - -// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON -// null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. -func (ec *EncodeContext) NilSliceAsEmpty() { - ec.nilSliceAsEmpty = true -} - -// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values -// instead of BSON null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. -func (ec *EncodeContext) NilByteSliceAsEmpty() { - ec.nilByteSliceAsEmpty = true -} - -// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) -// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. -// -// Note that the Encoder only examines exported struct fields when determining if a struct is the -// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. -func (ec *EncodeContext) OmitZeroStruct() { - ec.omitZeroStruct = true -} - -// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" -// struct tag is not specified. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead. -func (ec *EncodeContext) UseJSONStructTags() { - ec.useJSONStructTags = true -} - -// DecodeContext is the contextual information required for a Codec to decode a -// value. -type DecodeContext struct { - *Registry - - // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double" - // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64, - // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to - // BSON "decimal128" values. - // - // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead. - Truncate bool - - // Ancestor is the type of a containing document. This is mainly used to determine what type - // should be used when decoding an embedded document into an empty interface. For example, if - // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface - // will be decoded into a bson.M. - // - // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead. - Ancestor reflect.Type - - // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the - // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is - // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an - // error. DocumentType overrides the Ancestor field. - defaultDocumentType reflect.Type - - binaryAsSlice bool - useJSONStructTags bool - useLocalTimeZone bool - zeroMaps bool - zeroStructs bool -} - -// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or -// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. -func (dc *DecodeContext) BinaryAsSlice() { - dc.binaryAsSlice = true -} - -// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" -// struct tag is not specified. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. -func (dc *DecodeContext) UseJSONStructTags() { - dc.useJSONStructTags = true -} - -// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead -// of the UTC timezone. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. -func (dc *DecodeContext) UseLocalTimeZone() { - dc.useLocalTimeZone = true -} - -// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value -// passed to Decode before unmarshaling BSON documents into them. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. -func (dc *DecodeContext) ZeroMaps() { - dc.zeroMaps = true -} - -// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination -// value passed to Decode before unmarshaling BSON documents into them. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. -func (dc *DecodeContext) ZeroStructs() { - dc.zeroStructs = true -} - -// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This -// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead. -func (dc *DecodeContext) DefaultDocumentM() { - dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) -} - -// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This -// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead. -func (dc *DecodeContext) DefaultDocumentD() { - dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) -} - -// ValueCodec is an interface for encoding and decoding a reflect.Value. -// values. -// -// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead. -type ValueCodec interface { - ValueEncoder - ValueDecoder -} - -// ValueEncoder is the interface implemented by types that can handle the encoding of a value. -type ValueEncoder interface { - EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error -} - -// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueEncoder. -type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error - -// EncodeValue implements the ValueEncoder interface. -func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - return fn(ec, vw, val) -} - -// ValueDecoder is the interface implemented by types that can handle the decoding of a value. -type ValueDecoder interface { - DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error -} - -// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueDecoder. -type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error - -// DecodeValue implements the ValueDecoder interface. -func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - return fn(dc, vr, val) -} - -// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type. -type typeDecoder interface { - decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) -} - -// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder. -type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) - -func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - return fn(dc, vr, t) -} - -// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder. -type decodeAdapter struct { - ValueDecoderFunc - typeDecoderFunc -} - -var _ ValueDecoder = decodeAdapter{} -var _ typeDecoder = decodeAdapter{} - -// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type -// t and calls decoder.DecodeValue on it. -func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - td, _ := decoder.(typeDecoder) - return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true) -} - -func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) { - if td != nil { - val, err := td.decodeType(dc, vr, t) - if err == nil && convert && val.Type() != t { - // This conversion step is necessary for slices and maps. If a user declares variables like: - // - // type myBool bool - // var m map[string]myBool - // - // and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present - // because we'll try to assign a value of type bool to one of type myBool. - val = val.Convert(t) - } - return val, err - } - - val := reflect.New(t).Elem() - err := vd.DecodeValue(dc, vr, val) - return val, err -} - -// CodecZeroer is the interface implemented by Codecs that can also determine if -// a value of the type that would be encoded is zero. -// -// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver -// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to -// nil instead. -type CodecZeroer interface { - IsTypeZero(interface{}) bool -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go deleted file mode 100644 index 0134b5a9..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ByteSliceCodec is the Codec used for []byte values. -// -// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver -// 2.0. To configure the byte slice encode and decode behavior, use the -// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice -// encode and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to encode nil byte slices as empty -// BSON binary values, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// NilByteSliceAsEmpty: true, -// }) -// -// See the deprecation notice for each field in ByteSliceCodec for the -// corresponding settings. -type ByteSliceCodec struct { - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values - // instead of BSON null. - // - // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty - // instead. - EncodeNilAsEmpty bool -} - -var ( - defaultByteSliceCodec = NewByteSliceCodec() - - // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be - // used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultByteSliceCodec -) - -// NewByteSliceCodec returns a ByteSliceCodec with options opts. -// -// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See -// [ByteSliceCodec] for more details. -func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { - byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) - codec := ByteSliceCodec{} - if byteSliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for []byte. -func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tByteSlice { - return emptyValue, ValueDecoderError{ - Name: "ByteSliceDecodeValue", - Types: []reflect.Type{tByteSlice}, - Received: reflect.Zero(t), - } - } - - var data []byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - data = []byte(str) - case bsontype.Symbol: - sym, err := vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - data = []byte(sym) - case bsontype.Binary: - var subtype byte - data, subtype, err = vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"} - } - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(data), nil -} - -// DecodeValue is the ValueDecoder for []byte. -func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - elem, err := bsc.decodeType(dc, vr, tByteSlice) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go deleted file mode 100644 index 844b5029..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "sync" - "sync/atomic" -) - -// Runtime check that the kind encoder and decoder caches can store any valid -// reflect.Kind constant. -func init() { - if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { - panic("The capacity of kindEncoderCache is too small.\n" + - "This is due to a new type being added to reflect.Kind.") - } -} - -// statically assert array size -var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] -var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] - -type typeEncoderCache struct { - cache sync.Map // map[reflect.Type]ValueEncoder -} - -func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { - c.cache.Store(rt, enc) -} - -func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { - if v, _ := c.cache.Load(rt); v != nil { - return v.(ValueEncoder), true - } - return nil, false -} - -func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { - if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { - enc = v.(ValueEncoder) - } - return enc -} - -func (c *typeEncoderCache) Clone() *typeEncoderCache { - cc := new(typeEncoderCache) - c.cache.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - cc.cache.Store(k, v) - } - return true - }) - return cc -} - -type typeDecoderCache struct { - cache sync.Map // map[reflect.Type]ValueDecoder -} - -func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { - c.cache.Store(rt, dec) -} - -func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { - if v, _ := c.cache.Load(rt); v != nil { - return v.(ValueDecoder), true - } - return nil, false -} - -func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { - if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { - dec = v.(ValueDecoder) - } - return dec -} - -func (c *typeDecoderCache) Clone() *typeDecoderCache { - cc := new(typeDecoderCache) - c.cache.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - cc.cache.Store(k, v) - } - return true - }) - return cc -} - -// atomic.Value requires that all calls to Store() have the same concrete type -// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type -// is always the same (since different concrete types may implement the -// ValueEncoder interface). -type kindEncoderCacheEntry struct { - enc ValueEncoder -} - -type kindEncoderCache struct { - entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry -} - -func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { - if enc != nil && rt < reflect.Kind(len(c.entries)) { - c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) - } -} - -func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { - if rt < reflect.Kind(len(c.entries)) { - if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { - return ent.enc, ent.enc != nil - } - } - return nil, false -} - -func (c *kindEncoderCache) Clone() *kindEncoderCache { - cc := new(kindEncoderCache) - for i, v := range c.entries { - if val := v.Load(); val != nil { - cc.entries[i].Store(val) - } - } - return cc -} - -// atomic.Value requires that all calls to Store() have the same concrete type -// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type -// is always the same (since different concrete types may implement the -// ValueDecoder interface). -type kindDecoderCacheEntry struct { - dec ValueDecoder -} - -type kindDecoderCache struct { - entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry -} - -func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { - if rt < reflect.Kind(len(c.entries)) { - c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) - } -} - -func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { - if rt < reflect.Kind(len(c.entries)) { - if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { - return ent.dec, ent.dec != nil - } - } - return nil, false -} - -func (c *kindDecoderCache) Clone() *kindDecoderCache { - cc := new(kindDecoderCache) - for i, v := range c.entries { - if val := v.Load(); val != nil { - cc.entries[i].Store(val) - } - } - return cc -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go deleted file mode 100644 index cb8180f2..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" -) - -// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder. -type condAddrEncoder struct { - canAddrEnc ValueEncoder - elseEnc ValueEncoder -} - -var _ ValueEncoder = (*condAddrEncoder)(nil) - -// newCondAddrEncoder returns an condAddrEncoder. -func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder { - encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} - return &encoder -} - -// EncodeValue is the ValueEncoderFunc for a value that may be addressable. -func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.CanAddr() { - return cae.canAddrEnc.EncodeValue(ec, vw, val) - } - if cae.elseEnc != nil { - return cae.elseEnc.EncodeValue(ec, vw, val) - } - return ErrNoEncoder{Type: val.Type()} -} - -// condAddrDecoder is the decoder used when a pointer to the value has a decoder. -type condAddrDecoder struct { - canAddrDec ValueDecoder - elseDec ValueDecoder -} - -var _ ValueDecoder = (*condAddrDecoder)(nil) - -// newCondAddrDecoder returns an CondAddrDecoder. -func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder { - decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec} - return &decoder -} - -// DecodeValue is the ValueDecoderFunc for a value that may be addressable. -func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.CanAddr() { - return cad.canAddrDec.DecodeValue(dc, vr, val) - } - if cad.elseDec != nil { - return cad.elseDec.DecodeValue(dc, vr, val) - } - return ErrNoDecoder{Type: val.Type()} -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go deleted file mode 100644 index 7e08aab3..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ /dev/null @@ -1,1807 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "strconv" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var ( - defaultValueDecoders DefaultValueDecoders - errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled") -) - -type decodeBinaryError struct { - subtype byte - typeName string -} - -func (d decodeBinaryError) Error() string { - return fmt.Sprintf("only binary values with subtype 0x00 or 0x02 can be decoded into %s, but got subtype %v", d.typeName, d.subtype) -} - -func newDefaultStructCodec() *StructCodec { - codec, err := NewStructCodec(DefaultStructTagParser) - if err != nil { - // This function is called from the codec registration path, so errors can't be propagated. If there's an error - // constructing the StructCodec, we panic to avoid losing it. - panic(fmt.Errorf("error creating default StructCodec: %w", err)) - } - return codec -} - -// DefaultValueDecoders is a namespace type for the default ValueDecoders used -// when creating a registry. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -type DefaultValueDecoders struct{} - -// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with -// the provided RegistryBuilder. -// -// There is no support for decoding map[string]interface{} because there is no decoder for -// interface{}, so users must either register this decoder themselves or use the -// EmptyInterfaceDecoder available in the bson package. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) - } - - intDecoder := decodeAdapter{dvd.IntDecodeValue, dvd.intDecodeType} - floatDecoder := decodeAdapter{dvd.FloatDecodeValue, dvd.floatDecodeType} - - rb. - RegisterTypeDecoder(tD, ValueDecoderFunc(dvd.DDecodeValue)). - RegisterTypeDecoder(tBinary, decodeAdapter{dvd.BinaryDecodeValue, dvd.binaryDecodeType}). - RegisterTypeDecoder(tUndefined, decodeAdapter{dvd.UndefinedDecodeValue, dvd.undefinedDecodeType}). - RegisterTypeDecoder(tDateTime, decodeAdapter{dvd.DateTimeDecodeValue, dvd.dateTimeDecodeType}). - RegisterTypeDecoder(tNull, decodeAdapter{dvd.NullDecodeValue, dvd.nullDecodeType}). - RegisterTypeDecoder(tRegex, decodeAdapter{dvd.RegexDecodeValue, dvd.regexDecodeType}). - RegisterTypeDecoder(tDBPointer, decodeAdapter{dvd.DBPointerDecodeValue, dvd.dBPointerDecodeType}). - RegisterTypeDecoder(tTimestamp, decodeAdapter{dvd.TimestampDecodeValue, dvd.timestampDecodeType}). - RegisterTypeDecoder(tMinKey, decodeAdapter{dvd.MinKeyDecodeValue, dvd.minKeyDecodeType}). - RegisterTypeDecoder(tMaxKey, decodeAdapter{dvd.MaxKeyDecodeValue, dvd.maxKeyDecodeType}). - RegisterTypeDecoder(tJavaScript, decodeAdapter{dvd.JavaScriptDecodeValue, dvd.javaScriptDecodeType}). - RegisterTypeDecoder(tSymbol, decodeAdapter{dvd.SymbolDecodeValue, dvd.symbolDecodeType}). - RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeDecoder(tTime, defaultTimeCodec). - RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeDecoder(tCoreArray, defaultArrayCodec). - RegisterTypeDecoder(tOID, decodeAdapter{dvd.ObjectIDDecodeValue, dvd.objectIDDecodeType}). - RegisterTypeDecoder(tDecimal, decodeAdapter{dvd.Decimal128DecodeValue, dvd.decimal128DecodeType}). - RegisterTypeDecoder(tJSONNumber, decodeAdapter{dvd.JSONNumberDecodeValue, dvd.jsonNumberDecodeType}). - RegisterTypeDecoder(tURL, decodeAdapter{dvd.URLDecodeValue, dvd.urlDecodeType}). - RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)). - RegisterTypeDecoder(tCodeWithScope, decodeAdapter{dvd.CodeWithScopeDecodeValue, dvd.codeWithScopeDecodeType}). - RegisterDefaultDecoder(reflect.Bool, decodeAdapter{dvd.BooleanDecodeValue, dvd.booleanDecodeType}). - RegisterDefaultDecoder(reflect.Int, intDecoder). - RegisterDefaultDecoder(reflect.Int8, intDecoder). - RegisterDefaultDecoder(reflect.Int16, intDecoder). - RegisterDefaultDecoder(reflect.Int32, intDecoder). - RegisterDefaultDecoder(reflect.Int64, intDecoder). - RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Float32, floatDecoder). - RegisterDefaultDecoder(reflect.Float64, floatDecoder). - RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)). - RegisterDefaultDecoder(reflect.Map, defaultMapCodec). - RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultDecoder(reflect.String, defaultStringCodec). - RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). - RegisterTypeMapEntry(bsontype.Double, tFloat64). - RegisterTypeMapEntry(bsontype.String, tString). - RegisterTypeMapEntry(bsontype.Array, tA). - RegisterTypeMapEntry(bsontype.Binary, tBinary). - RegisterTypeMapEntry(bsontype.Undefined, tUndefined). - RegisterTypeMapEntry(bsontype.ObjectID, tOID). - RegisterTypeMapEntry(bsontype.Boolean, tBool). - RegisterTypeMapEntry(bsontype.DateTime, tDateTime). - RegisterTypeMapEntry(bsontype.Regex, tRegex). - RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer). - RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript). - RegisterTypeMapEntry(bsontype.Symbol, tSymbol). - RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope). - RegisterTypeMapEntry(bsontype.Int32, tInt32). - RegisterTypeMapEntry(bsontype.Int64, tInt64). - RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp). - RegisterTypeMapEntry(bsontype.Decimal128, tDecimal). - RegisterTypeMapEntry(bsontype.MinKey, tMinKey). - RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey). - RegisterTypeMapEntry(bsontype.Type(0), tD). - RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD). - RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)). - RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)) -} - -// DDecodeValue is the ValueDecoderFunc for primitive.D instances. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Type() != tD { - return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - dc.Ancestor = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a primitive.D", vrType) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return err - } - tEmptyTypeDecoder, _ := decoder.(typeDecoder) - - // Use the elements in the provided value if it's non nil. Otherwise, allocate a new D instance. - var elems primitive.D - if !val.IsNil() { - val.SetLen(0) - elems = val.Interface().(primitive.D) - } else { - elems = make(primitive.D, 0) - } - - for { - key, elemVr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } else if err != nil { - return err - } - - // Pass false for convert because we don't need to call reflect.Value.Convert for tEmpty. - elem, err := decodeTypeOrValueWithInfo(decoder, tEmptyTypeDecoder, dc, elemVr, tEmpty, false) - if err != nil { - return err - } - - elems = append(elems, primitive.E{Key: key, Value: elem.Interface()}) - } - - val.Set(reflect.ValueOf(elems)) - return nil -} - -func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.Bool { - return emptyValue, ValueDecoderError{ - Name: "BooleanDecodeValue", - Kinds: []reflect.Kind{reflect.Bool}, - Received: reflect.Zero(t), - } - } - - var b bool - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - b = (i32 != 0) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - b = (i64 != 0) - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - b = (f64 != 0) - case bsontype.Boolean: - b, err = vr.ReadBoolean() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a boolean", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(b), nil -} - -// BooleanDecodeValue is the ValueDecoderFunc for bool types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { - return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - - elem, err := dvd.booleanDecodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetBool(elem.Bool()) - return nil -} - -func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Int8: - if i64 < math.MinInt8 || i64 > math.MaxInt8 { - return emptyValue, fmt.Errorf("%d overflows int8", i64) - } - - return reflect.ValueOf(int8(i64)), nil - case reflect.Int16: - if i64 < math.MinInt16 || i64 > math.MaxInt16 { - return emptyValue, fmt.Errorf("%d overflows int16", i64) - } - - return reflect.ValueOf(int16(i64)), nil - case reflect.Int32: - if i64 < math.MinInt32 || i64 > math.MaxInt32 { - return emptyValue, fmt.Errorf("%d overflows int32", i64) - } - - return reflect.ValueOf(int32(i64)), nil - case reflect.Int64: - return reflect.ValueOf(i64), nil - case reflect.Int: - if int64(int(i64)) != i64 { // Can we fit this inside of an int - return emptyValue, fmt.Errorf("%d overflows int", i64) - } - - return reflect.ValueOf(int(i64)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: reflect.Zero(t), - } - } -} - -// IntDecodeValue is the ValueDecoderFunc for int types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } - } - - elem, err := dvd.intDecodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetInt(elem.Int()) - return nil -} - -// UintDecodeValue is the ValueDecoderFunc for uint types. -// -// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var i64 int64 - var err error - switch vr.Type() { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") - } - if f64 > float64(math.MaxInt64) { - return fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return err - } - if b { - i64 = 1 - } - default: - return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) - } - - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - switch val.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return fmt.Errorf("%d overflows uint8", i64) - } - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return fmt.Errorf("%d overflows uint16", i64) - } - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return fmt.Errorf("%d overflows uint32", i64) - } - case reflect.Uint64: - if i64 < 0 { - return fmt.Errorf("%d overflows uint64", i64) - } - case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint - return fmt.Errorf("%d overflows uint", i64) - } - default: - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - val.SetUint(uint64(i64)) - return nil -} - -func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var f float64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - f = float64(i32) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - f = float64(i64) - case bsontype.Double: - f, err = vr.ReadDouble() - if err != nil { - return emptyValue, err - } - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - f = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) - } - - switch t.Kind() { - case reflect.Float32: - if !dc.Truncate && float64(float32(f)) != f { - return emptyValue, errCannotTruncate - } - - return reflect.ValueOf(float32(f)), nil - case reflect.Float64: - return reflect.ValueOf(f), nil - default: - return emptyValue, ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: reflect.Zero(t), - } - } -} - -// FloatDecodeValue is the ValueDecoderFunc for float types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: val, - } - } - - elem, err := dvd.floatDecodeType(ec, vr, val.Type()) - if err != nil { - return err - } - - val.SetFloat(elem.Float()) - return nil -} - -// StringDecodeValue is the ValueDecoderFunc for string types. -// -// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var str string - var err error - switch vr.Type() { - // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return err - } - default: - return fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - val.SetString(str) - return nil -} - -func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJavaScript { - return emptyValue, ValueDecoderError{ - Name: "JavaScriptDecodeValue", - Types: []reflect.Type{tJavaScript}, - Received: reflect.Zero(t), - } - } - - var js string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.JavaScript: - js, err = vr.ReadJavascript() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.JavaScript(js)), nil -} - -// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJavaScript { - return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tSymbol { - return emptyValue, ValueDecoderError{ - Name: "SymbolDecodeValue", - Types: []reflect.Type{tSymbol}, - Received: reflect.Zero(t), - } - } - - var symbol string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - symbol, err = vr.ReadString() - case bsontype.Symbol: - symbol, err = vr.ReadSymbol() - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"} - } - symbol = string(data) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Symbol(symbol)), nil -} - -// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tSymbol { - return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tBinary { - return emptyValue, ValueDecoderError{ - Name: "BinaryDecodeValue", - Types: []reflect.Type{tBinary}, - Received: reflect.Zero(t), - } - } - - var data []byte - var subtype byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Binary: - data, subtype, err = vr.ReadBinary() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil -} - -// BinaryDecodeValue is the ValueDecoderFunc for Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tBinary { - return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - - elem, err := dvd.binaryDecodeType(dc, vr, tBinary) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tUndefined { - return emptyValue, ValueDecoderError{ - Name: "UndefinedDecodeValue", - Types: []reflect.Type{tUndefined}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Undefined{}), nil -} - -// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tUndefined { - return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// Accept both 12-byte string and pretty-printed 24-byte hex string formats. -func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tOID { - return emptyValue, ValueDecoderError{ - Name: "ObjectIDDecodeValue", - Types: []reflect.Type{tOID}, - Received: reflect.Zero(t), - } - } - - var oid primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.ObjectID: - oid, err = vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - if oid, err = primitive.ObjectIDFromHex(str); err == nil { - break - } - if len(str) != 12 { - return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str)) - } - byteArr := []byte(str) - copy(oid[:], byteArr) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType) - } - - return reflect.ValueOf(oid), nil -} - -// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tOID { - return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} - } - - elem, err := dvd.objectIDDecodeType(dc, vr, tOID) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDateTime { - return emptyValue, ValueDecoderError{ - Name: "DateTimeDecodeValue", - Types: []reflect.Type{tDateTime}, - Received: reflect.Zero(t), - } - } - - var dt int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err = vr.ReadDateTime() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DateTime(dt)), nil -} - -// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDateTime { - return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tNull { - return emptyValue, ValueDecoderError{ - Name: "NullDecodeValue", - Types: []reflect.Type{tNull}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Null{}), nil -} - -// NullDecodeValue is the ValueDecoderFunc for Null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tNull { - return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - elem, err := dvd.nullDecodeType(dc, vr, tNull) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tRegex { - return emptyValue, ValueDecoderError{ - Name: "RegexDecodeValue", - Types: []reflect.Type{tRegex}, - Received: reflect.Zero(t), - } - } - - var pattern, options string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Regex: - pattern, options, err = vr.ReadRegex() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil -} - -// RegexDecodeValue is the ValueDecoderFunc for Regex. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tRegex { - return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - elem, err := dvd.regexDecodeType(dc, vr, tRegex) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDBPointer { - return emptyValue, ValueDecoderError{ - Name: "DBPointerDecodeValue", - Types: []reflect.Type{tDBPointer}, - Received: reflect.Zero(t), - } - } - - var ns string - var pointer primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DBPointer: - ns, pointer, err = vr.ReadDBPointer() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil -} - -// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDBPointer { - return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { - if reflectType != tTimestamp { - return emptyValue, ValueDecoderError{ - Name: "TimestampDecodeValue", - Types: []reflect.Type{tTimestamp}, - Received: reflect.Zero(reflectType), - } - } - - var t, incr uint32 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Timestamp: - t, incr, err = vr.ReadTimestamp() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil -} - -// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTimestamp { - return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMinKey { - return emptyValue, ValueDecoderError{ - Name: "MinKeyDecodeValue", - Types: []reflect.Type{tMinKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MinKey: - err = vr.ReadMinKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MinKey{}), nil -} - -// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMinKey { - return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMaxKey { - return emptyValue, ValueDecoderError{ - Name: "MaxKeyDecodeValue", - Types: []reflect.Type{tMaxKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MaxKey: - err = vr.ReadMaxKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MaxKey{}), nil -} - -// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMaxKey { - return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDecimal { - return emptyValue, ValueDecoderError{ - Name: "Decimal128DecodeValue", - Types: []reflect.Type{tDecimal}, - Received: reflect.Zero(t), - } - } - - var d128 primitive.Decimal128 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Decimal128: - d128, err = vr.ReadDecimal128() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(d128), nil -} - -// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDecimal { - return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - - elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJSONNumber { - return emptyValue, ValueDecoderError{ - Name: "JSONNumberDecodeValue", - Types: []reflect.Type{tJSONNumber}, - Received: reflect.Zero(t), - } - } - - var jsonNum json.Number - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64)) - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(int64(i32), 10)) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(i64, 10)) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(jsonNum), nil -} - -// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJSONNumber { - return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - - elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tURL { - return emptyValue, ValueDecoderError{ - Name: "URLDecodeValue", - Types: []reflect.Type{tURL}, - Received: reflect.Zero(t), - } - } - - urlPtr := &url.URL{} - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - var str string // Declare str here to avoid shadowing err during the ReadString call. - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - - urlPtr, err = url.Parse(str) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(urlPtr).Elem(), nil -} - -// URLDecodeValue is the ValueDecoderFunc for url.URL. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tURL { - return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} - } - - elem, err := dvd.urlDecodeType(dc, vr, tURL) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// TimeDecodeValue is the ValueDecoderFunc for time.Time. -// -// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.DateTime { - return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) - } - - dt, err := vr.ReadDateTime() - if err != nil { - return err - } - - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) - return nil -} - -// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. -// -// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { - return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) - } - - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - } - - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != 0x00 { - return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) - } - - val.Set(reflect.ValueOf(data)) - return nil -} - -// MapDecodeValue is the ValueDecoderFunc for map[string]* types. -// -// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return err - } - - elem := reflect.New(eType).Elem() - - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) - } - return nil -} - -// ArrayDecodeValue is the ValueDecoderFunc for array types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if len(data) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) - } - - for idx, elem := range data { - val.Index(idx).Set(reflect.ValueOf(elem)) - } - return nil - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into an array", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if len(elems) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems)) - } - - for idx, elem := range elems { - val.Index(idx).Set(elem) - } - - return nil -} - -// SliceDecodeValue is the ValueDecoderFunc for slice types. -// -// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vr.Type() { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - default: - return fmt.Errorf("cannot decode %v into a slice", vr.Type()) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} - -// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - if !val.Type().Implements(tValueUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - m, ok := val.Interface().(ValueUnmarshaler) - if !ok { - // NB: this error should be unreachable due to the above checks - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - return m.UnmarshalBSONValue(t, src) -} - -// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - // If the target Go value is a pointer and the BSON field value is empty, set the value to the - // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to - // change the pointer value from within the function (only the value at the pointer address), - // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON - // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches - // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and - // the JSON field value is "null". - if val.Kind() == reflect.Ptr && len(src) == 0 { - val.Set(reflect.Zero(val.Type())) - return nil - } - - if !val.Type().Implements(tUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - m, ok := val.Interface().(Unmarshaler) - if !ok { - // NB: this error should be unreachable due to the above checks - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - return m.UnmarshalBSON(src) -} - -// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - rtype, err := dc.LookupTypeMapEntry(vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.EmbeddedDocument: - if dc.Ancestor != nil { - rtype = dc.Ancestor - break - } - rtype = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return err - } - - elem := reflect.New(rtype).Elem() - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreDocument { - return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - - cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) - val.Set(reflect.ValueOf(cdoc)) - return err -} - -func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { - elems := make([]reflect.Value, 0) - - ar, err := vr.ReadArray() - if err != nil { - return nil, err - } - - eType := val.Type().Elem() - - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return nil, err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - idx := 0 - for { - vr, err := ar.ReadValue() - if errors.Is(err, bsonrw.ErrEOA) { - break - } - if err != nil { - return nil, err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return nil, newDecodeError(strconv.Itoa(idx), err) - } - elems = append(elems, elem) - idx++ - } - - return elems, nil -} - -func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) { - var cws primitive.CodeWithScope - - code, dr, err := vr.ReadCodeWithScope() - if err != nil { - return cws, err - } - - scope := reflect.New(tD).Elem() - elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) - if err != nil { - return cws, err - } - - scope.Set(reflect.MakeSlice(tD, 0, len(elems))) - scope.Set(reflect.Append(scope, elems...)) - - cws = primitive.CodeWithScope{ - Code: primitive.JavaScript(code), - Scope: scope.Interface().(primitive.D), - } - return cws, nil -} - -func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tCodeWithScope { - return emptyValue, ValueDecoderError{ - Name: "CodeWithScopeDecodeValue", - Types: []reflect.Type{tCodeWithScope}, - Received: reflect.Zero(t), - } - } - - var cws primitive.CodeWithScope - var err error - switch vrType := vr.Type(); vrType { - case bsontype.CodeWithScope: - cws, err = dvd.readCodeWithScope(dc, vr) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(cws), nil -} - -// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCodeWithScope { - return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - default: - return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return nil, err - } - - return dvd.decodeElemsFromDocumentReader(dc, dr) -} - -func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return nil, err - } - - elems := make([]reflect.Value, 0) - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return nil, err - } - - val := reflect.New(tEmpty).Elem() - err = decoder.DecodeValue(dc, vr, val) - if err != nil { - return nil, newDecodeError(key, err) - } - - elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) - } - - return elems, nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go deleted file mode 100644 index 4751ae99..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ /dev/null @@ -1,856 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var defaultValueEncoders DefaultValueEncoders - -var bvwPool = bsonrw.NewBSONValueWriterPool() - -var errInvalidValue = errors.New("cannot encode invalid element") - -var sliceWriterPool = sync.Pool{ - New: func() interface{} { - sw := make(bsonrw.SliceWriter, 0) - return &sw - }, -} - -func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { - vw, err := dw.WriteDocumentElement(e.Key) - if err != nil { - return err - } - - if e.Value == nil { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) - if err != nil { - return err - } - return nil -} - -// DefaultValueEncoders is a namespace type for the default ValueEncoders used -// when creating a registry. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -type DefaultValueEncoders struct{} - -// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with -// the provided RegistryBuilder. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) - } - rb. - RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeEncoder(tTime, defaultTimeCodec). - RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeEncoder(tCoreArray, defaultArrayCodec). - RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). - RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). - RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). - RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). - RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). - RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). - RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). - RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). - RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). - RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). - RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). - RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). - RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). - RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). - RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). - RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). - RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). - RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). - RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). - RegisterDefaultEncoder(reflect.Map, defaultMapCodec). - RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultEncoder(reflect.String, defaultStringCodec). - RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). - RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). - RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). - RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)) -} - -// BooleanEncodeValue is the ValueEncoderFunc for bool types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Bool { - return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - return vw.WriteBoolean(val.Bool()) -} - -func fitsIn32Bits(i int64) bool { - return math.MinInt32 <= i && i <= math.MaxInt32 -} - -// IntEncodeValue is the ValueEncoderFunc for int types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32: - return vw.WriteInt32(int32(val.Int())) - case reflect.Int: - i64 := val.Int() - if fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - case reflect.Int64: - i64 := val.Int() - if ec.MinSize && fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - } - - return ValueEncoderError{ - Name: "IntEncodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } -} - -// UintEncodeValue is the ValueEncoderFunc for uint types. -// -// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead. -func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - if ec.MinSize && u64 <= math.MaxInt32 { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -// FloatEncodeValue is the ValueEncoderFunc for float types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Float32, reflect.Float64: - return vw.WriteDouble(val.Float()) - } - - return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} -} - -// StringEncodeValue is the ValueEncoderFunc for string types. -// -// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. -func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tOID { - return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} - } - return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) -} - -// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDecimal { - return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) -} - -// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJSONNumber { - return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - jsnum := val.Interface().(json.Number) - - // Attempt int first, then float64 - if i64, err := jsnum.Int64(); err == nil { - return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) - } - - f64, err := jsnum.Float64() - if err != nil { - return err - } - - return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) -} - -// URLEncodeValue is the ValueEncoderFunc for url.URL. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tURL { - return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} - } - u := val.Interface().(url.URL) - return vw.WriteString(u.String()) -} - -// TimeEncodeValue is the ValueEncoderFunc for time.TIme. -// -// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. -func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} - -// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. -// -// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -// MapEncodeValue is the ValueEncoderFunc for map[string]* types. -// -// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead. -func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() { - // If we have a nill map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return dve.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - if collisionFn != nil && collisionFn(key.String()) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(key.String()) - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// ArrayEncodeValue is the ValueEncoderFunc for array types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().Elem() == tE { - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - e := val.Index(idx).Interface().(primitive.E) - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } - return vw.WriteBinary(byteSlice) - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// SliceEncodeValue is the ValueEncoderFunc for slice types. -// -// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) { - if origEncoder != nil || (currVal.Kind() != reflect.Interface) { - return origEncoder, currVal, nil - } - currVal = currVal.Elem() - if !currVal.IsValid() { - return nil, currVal, errInvalidValue - } - currEncoder, err := ec.LookupEncoder(currVal.Type()) - - return currEncoder, currVal, err -} - -// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement ValueMarshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - case val.Type().Implements(tValueMarshaler): - // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tValueMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - } - - m, ok := val.Interface().(ValueMarshaler) - if !ok { - return vw.WriteNull() - } - t, data, err := m.MarshalBSONValue() - if err != nil { - return err - } - return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) -} - -// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Marshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - case val.Type().Implements(tMarshaler): - // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - } - - m, ok := val.Interface().(Marshaler) - if !ok { - return vw.WriteNull() - } - data, err := m.MarshalBSON() - if err != nil { - return err - } - return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) -} - -// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Proxy - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - case val.Type().Implements(tProxy): - // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tProxy) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - } - - m, ok := val.Interface().(Proxy) - if !ok { - return vw.WriteNull() - } - v, err := m.ProxyBSON() - if err != nil { - return err - } - if v == nil { - encoder, err := ec.LookupEncoder(nil) - if err != nil { - return err - } - return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) - } - vv := reflect.ValueOf(v) - switch vv.Kind() { - case reflect.Ptr, reflect.Interface: - vv = vv.Elem() - } - encoder, err := ec.LookupEncoder(vv.Type()) - if err != nil { - return err - } - return encoder.EncodeValue(ec, vw, vv) -} - -// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJavaScript { - return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - return vw.WriteJavascript(val.String()) -} - -// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tSymbol { - return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - return vw.WriteSymbol(val.String()) -} - -// BinaryEncodeValue is the ValueEncoderFunc for Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tBinary { - return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - b := val.Interface().(primitive.Binary) - - return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) -} - -// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tUndefined { - return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - return vw.WriteUndefined() -} - -// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDateTime { - return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - return vw.WriteDateTime(val.Int()) -} - -// NullEncodeValue is the ValueEncoderFunc for Null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tNull { - return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - return vw.WriteNull() -} - -// RegexEncodeValue is the ValueEncoderFunc for Regex. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tRegex { - return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - regex := val.Interface().(primitive.Regex) - - return vw.WriteRegex(regex.Pattern, regex.Options) -} - -// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDBPointer { - return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - dbp := val.Interface().(primitive.DBPointer) - - return vw.WriteDBPointer(dbp.DB, dbp.Pointer) -} - -// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTimestamp { - return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - ts := val.Interface().(primitive.Timestamp) - - return vw.WriteTimestamp(ts.T, ts.I) -} - -// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMinKey { - return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - return vw.WriteMinKey() -} - -// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMaxKey { - return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - return vw.WriteMaxKey() -} - -// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreDocument { - return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - cdoc := val.Interface().(bsoncore.Document) - - return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) -} - -// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCodeWithScope { - return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - cws := val.Interface().(primitive.CodeWithScope) - - dw, err := vw.WriteCodeWithScope(string(cws.Code)) - if err != nil { - return err - } - - sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) - defer sliceWriterPool.Put(sw) - *sw = (*sw)[:0] - - scopeVW := bvwPool.Get(sw) - defer bvwPool.Put(scopeVW) - - encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) - if err != nil { - return err - } - - err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) - if err != nil { - return err - } - return dw.WriteDocumentEnd() -} - -// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type -func isImplementationNil(val reflect.Value, inter reflect.Type) bool { - vt := val.Type() - for vt.Kind() == reflect.Ptr { - vt = vt.Elem() - } - return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go deleted file mode 100644 index 4613e5a1..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsoncodec provides a system for encoding values to BSON representations and decoding -// values from BSON representations. This package considers both binary BSON and ExtendedJSON as -// BSON representations. The types in this package enable a flexible system for handling this -// encoding and decoding. -// -// The codec system is composed of two parts: -// -// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON -// representations. -// -// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for -// retrieving them. -// -// # ValueEncoders and ValueDecoders -// -// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. -// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the -// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc -// is provided to allow use of a function with the correct signature as a ValueEncoder. An -// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and -// to provide configuration information. -// -// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that -// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to -// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext -// instance is provided and serves similar functionality to the EncodeContext. -// -// # Registry -// -// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type -// documentation for examples of registering various custom encoders and decoders. A Registry can -// have three main types of codecs: -// -// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and -// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value -// whose type matches the registered type exactly. -// If the registered type is an interface, the codec will be invoked when encoding or decoding -// values whose type is the interface, but not for values with concrete types that implement the -// interface. -// -// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and -// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs -// will be invoked when encoding or decoding values whose types implement the interface. An example -// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method -// for any value whose type implements bson.Marshaler, regardless of the value's concrete type. -// -// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type -// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}. -// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances, -// respectively, when decoding into a bson.D. The following code would change the behavior so these -// values decode as Go int instances instead: -// -// intType := reflect.TypeOf(int(0)) -// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) -// -// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and -// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding -// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't -// match a registered type or hook encoder/decoder first. These methods should be used to change the -// behavior for all values for a specific kind. -// -// # Registry Lookup Procedure -// -// When looking up an encoder in a Registry, the precedence rules are as follows: -// -// 1. A type encoder registered for the exact type of the value. -// -// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to -// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and -// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries -// constructed using bson.NewRegistry have driver-defined hooks registered for the -// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take -// precedence over any new hooks. -// -// 3. A kind encoder registered for the value's kind. -// -// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The -// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder -// will be returned if no decoder is found. -// -// # DefaultValueEncoders and DefaultValueDecoders -// -// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and -// ValueDecoders for handling a wide range of Go types, including all of the types within the -// primitive package. To make registering these codecs easier, a helper method on each type is -// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for -// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also -// handles registering type map entries for each BSON type. -package bsoncodec diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go deleted file mode 100644 index 098368f0..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// EmptyInterfaceCodec is the Codec used for interface{} values. -// -// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go -// Driver 2.0. To configure the empty interface encode and decode behavior, use -// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface -// encode and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to unmarshal BSON binary field -// values as a Go byte slice, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// BinaryAsSlice: true, -// }) -// -// See the deprecation notice for each field in EmptyInterfaceCodec for the -// corresponding settings. -type EmptyInterfaceCodec struct { - // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the - // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. - // - // Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead. - DecodeBinaryAsSlice bool -} - -var ( - defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() - - // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it - // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultEmptyInterfaceCodec -) - -// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. -// -// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See -// [EmptyInterfaceCodec] for more details. -func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { - interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) - - codec := EmptyInterfaceCodec{} - if interfaceOpt.DecodeBinaryAsSlice != nil { - codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice - } - return &codec -} - -// EncodeValue is the ValueEncoderFunc for interface{}. -func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { - isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument - if isDocument { - if dc.defaultDocumentType != nil { - // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return - // that type. - return dc.defaultDocumentType, nil - } - if dc.Ancestor != nil { - // Using ancestor information rather than looking up the type map entry forces consistent decoding. - // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry - // has been registered. - return dc.Ancestor, nil - } - } - - rtype, err := dc.LookupTypeMapEntry(valueType) - if err == nil { - return rtype, nil - } - - if isDocument { - // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument, - // depending on the original valueType. - var lookupType bsontype.Type - switch valueType { - case bsontype.Type(0): - lookupType = bsontype.EmbeddedDocument - case bsontype.EmbeddedDocument: - lookupType = bsontype.Type(0) - } - - rtype, err = dc.LookupTypeMapEntry(lookupType) - if err == nil { - return rtype, nil - } - } - - return nil, err -} - -func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tEmpty { - return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)} - } - - rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.Null: - return reflect.Zero(t), vr.ReadNull() - default: - return emptyValue, err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return emptyValue, err - } - - elem, err := decodeTypeOrValue(decoder, dc, vr, rtype) - if err != nil { - return emptyValue, err - } - - if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary { - binElem := elem.Interface().(primitive.Binary) - if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { - elem = reflect.ValueOf(binElem.Data) - } - } - - return elem, nil -} - -// DecodeValue is the ValueDecoderFunc for interface{}. -func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - elem, err := eic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go deleted file mode 100644 index d7e00ffa..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ /dev/null @@ -1,343 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var defaultMapCodec = NewMapCodec() - -// MapCodec is the Codec used for map values. -// -// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To -// configure the map encode and decode behavior, use the configuration methods -// on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and -// decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON -// documents, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// NilMapAsEmpty: true, -// }) -// -// See the deprecation notice for each field in MapCodec for the corresponding -// settings. -type MapCodec struct { - // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination - // value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead. - DecodeZerosMap bool - - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of - // BSON null. - // - // Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead. - EncodeNilAsEmpty bool - - // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name - // strings using fmt.Sprintf() instead of the default string conversion logic. - // - // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or - // options.BSONOptions.StringifyMapKeysWithFmt instead. - EncodeKeysWithStringer bool -} - -// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. -// This applies to types used as map keys and is similar to encoding.TextMarshaler. -type KeyMarshaler interface { - MarshalKey() (key string, err error) -} - -// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation -// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler. -// -// UnmarshalKey must be able to decode the form generated by MarshalKey. -// UnmarshalKey must copy the text if it wishes to retain the text -// after returning. -type KeyUnmarshaler interface { - UnmarshalKey(key string) error -} - -// NewMapCodec returns a MapCodec with options opts. -// -// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See -// [MapCodec] for more details. -func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { - mapOpt := bsonoptions.MergeMapCodecOptions(opts...) - - codec := MapCodec{} - if mapOpt.DecodeZerosMap != nil { - codec.DecodeZerosMap = *mapOpt.DecodeZerosMap - } - if mapOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty - } - if mapOpt.EncodeKeysWithStringer != nil { - codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer - } - return &codec -} - -// EncodeValue is the ValueEncoder for map[*]* types. -func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty { - // If we have a nil map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return mc.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt) - if err != nil { - return err - } - - if collisionFn != nil && collisionFn(keyStr) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(keyStr) - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// DecodeValue is the ValueDecoder for map[string/decimal]* types. -func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) { - clearMap(val) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return err - } - - k, err := mc.decodeKey(key, keyType) - if err != nil { - return err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return newDecodeError(key, err) - } - - val.SetMapIndex(k, elem) - } - return nil -} - -func clearMap(m reflect.Value) { - var none reflect.Value - for _, k := range m.MapKeys() { - m.SetMapIndex(k, none) - } -} - -func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) { - if mc.EncodeKeysWithStringer || encodeKeysWithStringer { - return fmt.Sprint(val), nil - } - - // keys of any string type are used directly - if val.Kind() == reflect.String { - return val.String(), nil - } - // KeyMarshalers are marshaled - if km, ok := val.Interface().(KeyMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - buf, err := km.MarshalKey() - if err == nil { - return buf, nil - } - return "", err - } - // keys implement encoding.TextMarshaler are marshaled. - if km, ok := val.Interface().(encoding.TextMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - - buf, err := km.MarshalText() - if err != nil { - return "", err - } - - return string(buf), nil - } - - switch val.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(val.Int(), 10), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(val.Uint(), 10), nil - } - return "", fmt.Errorf("unsupported key type: %v", val.Type()) -} - -var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() -var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - -func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { - keyVal := reflect.ValueOf(key) - var err error - switch { - // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler - case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(KeyUnmarshaler) - err = v.UnmarshalKey(key) - keyVal = keyVal.Elem() - // Try to decode encoding.TextUnmarshalers. - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(encoding.TextUnmarshaler) - err = v.UnmarshalText([]byte(key)) - keyVal = keyVal.Elem() - // Otherwise, go to type specific behavior - default: - switch keyType.Kind() { - case reflect.String: - keyVal = reflect.ValueOf(key).Convert(keyType) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, parseErr := strconv.ParseInt(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, parseErr := strconv.ParseUint(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - break - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Float32, reflect.Float64: - if mc.EncodeKeysWithStringer { - parsed, err := strconv.ParseFloat(key, 64) - if err != nil { - return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err) - } - keyVal = reflect.ValueOf(parsed) - break - } - fallthrough - default: - return keyVal, fmt.Errorf("unsupported key type: %v", keyType) - } - } - return keyVal, err -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go deleted file mode 100644 index fbd9f0a9..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import "fmt" - -type mode int - -const ( - _ mode = iota - mTopLevel - mDocument - mArray - mValue - mElement - mCodeWithScope - mSpacer -) - -func (m mode) String() string { - var str string - - switch m { - case mTopLevel: - str = "TopLevel" - case mDocument: - str = "DocumentMode" - case mArray: - str = "ArrayMode" - case mValue: - str = "ValueMode" - case mElement: - str = "ElementMode" - case mCodeWithScope: - str = "CodeWithScopeMode" - case mSpacer: - str = "CodeWithScopeSpacerFrame" - default: - str = "UnknownMode" - } - - return str -} - -// TransitionError is an error returned when an invalid progressing a -// ValueReader or ValueWriter state machine occurs. -type TransitionError struct { - parent mode - current mode - destination mode -} - -func (te TransitionError) Error() string { - if te.destination == mode(0) { - return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) - } - if te.parent == mode(0) { - return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) - } - return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go deleted file mode 100644 index ddfa4a33..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var _ ValueEncoder = &PointerCodec{} -var _ ValueDecoder = &PointerCodec{} - -// PointerCodec is the Codec used for pointers. -// -// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To -// override the default pointer encode and decode behavior, create a new registry -// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new -// encoder and decoder for pointers. -// -// For example, -// -// reg := bson.NewRegistry() -// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder) -// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder) -type PointerCodec struct { - ecache typeEncoderCache - dcache typeDecoderCache -} - -// NewPointerCodec returns a PointerCodec that has been initialized. -// -// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See -// [PointerCodec] for more details. -func NewPointerCodec() *PointerCodec { - return &PointerCodec{} -} - -// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil -// or looking up an encoder for the type of value the pointer points to. -func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.Ptr { - if !val.IsValid() { - return vw.WriteNull() - } - return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - typ := val.Type() - if v, ok := pc.ecache.Load(typ); ok { - if v == nil { - return ErrNoEncoder{Type: typ} - } - return v.EncodeValue(ec, vw, val.Elem()) - } - // TODO(charlie): handle concurrent requests for the same type - enc, err := ec.LookupEncoder(typ.Elem()) - enc = pc.ecache.LoadOrStore(typ, enc) - if err != nil { - return err - } - return enc.EncodeValue(ec, vw, val.Elem()) -} - -// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and -// using that to decode. If the BSON value is Null, this method will set the pointer to nil. -func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Ptr { - return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - typ := val.Type() - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(typ)) - return vr.ReadNull() - } - if vr.Type() == bsontype.Undefined { - val.Set(reflect.Zero(typ)) - return vr.ReadUndefined() - } - - if val.IsNil() { - val.Set(reflect.New(typ.Elem())) - } - - if v, ok := pc.dcache.Load(typ); ok { - if v == nil { - return ErrNoDecoder{Type: typ} - } - return v.DecodeValue(dc, vr, val.Elem()) - } - // TODO(charlie): handle concurrent requests for the same type - dec, err := dc.LookupDecoder(typ.Elem()) - dec = pc.dcache.LoadOrStore(typ, dec) - if err != nil { - return err - } - return dec.DecodeValue(dc, vr, val.Elem()) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go deleted file mode 100644 index 4cf2b01a..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types -// that implement this interface with have ProxyBSON called during the encoding process and that -// value will be encoded in place for the implementer. -type Proxy interface { - ProxyBSON() (interface{}, error) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go deleted file mode 100644 index 196c491b..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. -// -// Deprecated: ErrNilType will not be supported in Go Driver 2.0. -var ErrNilType = errors.New("cannot perform a decoder lookup on ") - -// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. -// -// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0. -var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") - -// ErrNoEncoder is returned when there wasn't an encoder available for a type. -// -// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0. -type ErrNoEncoder struct { - Type reflect.Type -} - -func (ene ErrNoEncoder) Error() string { - if ene.Type == nil { - return "no encoder found for " - } - return "no encoder found for " + ene.Type.String() -} - -// ErrNoDecoder is returned when there wasn't a decoder available for a type. -// -// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0. -type ErrNoDecoder struct { - Type reflect.Type -} - -func (end ErrNoDecoder) Error() string { - return "no decoder found for " + end.Type.String() -} - -// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. -// -// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0. -type ErrNoTypeMapEntry struct { - Type bsontype.Type -} - -func (entme ErrNoTypeMapEntry) Error() string { - return "no type map entry found for " + entme.Type.String() -} - -// ErrNotInterface is returned when the provided type is not an interface. -// -// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0. -var ErrNotInterface = errors.New("The provided type is not an interface") - -// A RegistryBuilder is used to build a Registry. This type is not goroutine -// safe. -// -// Deprecated: Use Registry instead. -type RegistryBuilder struct { - registry *Registry -} - -// NewRegistryBuilder creates a new empty RegistryBuilder. -// -// Deprecated: Use NewRegistry instead. -func NewRegistryBuilder() *RegistryBuilder { - return &RegistryBuilder{ - registry: NewRegistry(), - } -} - -// RegisterCodec will register the provided ValueCodec for the provided type. -// -// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead. -func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { - rb.RegisterTypeEncoder(t, codec) - rb.RegisterTypeDecoder(t, codec) - return rb -} - -// RegisterTypeEncoder will register the provided ValueEncoder for the provided type. -// -// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It -// will not be called when marshaling a non-interface type that implements the interface. -// -// Deprecated: Use Registry.RegisterTypeEncoder instead. -func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterTypeEncoder(t, enc) - return rb -} - -// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when -// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -// -// Deprecated: Use Registry.RegisterInterfaceEncoder instead. -func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterInterfaceEncoder(t, enc) - return rb -} - -// RegisterTypeDecoder will register the provided ValueDecoder for the provided type. -// -// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface. -// It will not be called when unmarshaling into a non-interface type that implements the interface. -// -// Deprecated: Use Registry.RegisterTypeDecoder instead. -func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterTypeDecoder(t, dec) - return rb -} - -// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when -// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -// -// Deprecated: Use Registry.RegisterInterfaceDecoder instead. -func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterInterfaceDecoder(t, dec) - return rb -} - -// RegisterEncoder registers the provided type and encoder pair. -// -// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead. -func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t == tEmpty { - rb.registry.RegisterTypeEncoder(t, enc) - return rb - } - switch t.Kind() { - case reflect.Interface: - rb.registry.RegisterInterfaceEncoder(t, enc) - default: - rb.registry.RegisterTypeEncoder(t, enc) - } - return rb -} - -// RegisterDecoder registers the provided type and decoder pair. -// -// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead. -func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t == nil { - rb.registry.RegisterTypeDecoder(t, dec) - return rb - } - if t == tEmpty { - rb.registry.RegisterTypeDecoder(t, dec) - return rb - } - switch t.Kind() { - case reflect.Interface: - rb.registry.RegisterInterfaceDecoder(t, dec) - default: - rb.registry.RegisterTypeDecoder(t, dec) - } - return rb -} - -// RegisterDefaultEncoder will register the provided ValueEncoder to the provided -// kind. -// -// Deprecated: Use Registry.RegisterKindEncoder instead. -func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterKindEncoder(kind, enc) - return rb -} - -// RegisterDefaultDecoder will register the provided ValueDecoder to the -// provided kind. -// -// Deprecated: Use Registry.RegisterKindDecoder instead. -func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterKindDecoder(kind, dec) - return rb -} - -// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this -// mapping is decoding situations where an empty interface is used and a default type needs to be -// created and decoded into. -// -// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON -// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents -// to decode to bson.Raw, use the following code: -// -// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) -// -// Deprecated: Use Registry.RegisterTypeMapEntry instead. -func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { - rb.registry.RegisterTypeMapEntry(bt, rt) - return rb -} - -// Build creates a Registry from the current state of this RegistryBuilder. -// -// Deprecated: Use NewRegistry instead. -func (rb *RegistryBuilder) Build() *Registry { - r := &Registry{ - interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), - interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), - typeEncoders: rb.registry.typeEncoders.Clone(), - typeDecoders: rb.registry.typeDecoders.Clone(), - kindEncoders: rb.registry.kindEncoders.Clone(), - kindDecoders: rb.registry.kindDecoders.Clone(), - } - rb.registry.typeMap.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - r.typeMap.Store(k, v) - } - return true - }) - return r -} - -// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main -// typed passed around and Encoders and Decoders are constructed from it. -type Registry struct { - interfaceEncoders []interfaceValueEncoder - interfaceDecoders []interfaceValueDecoder - typeEncoders *typeEncoderCache - typeDecoders *typeDecoderCache - kindEncoders *kindEncoderCache - kindDecoders *kindDecoderCache - typeMap sync.Map // map[bsontype.Type]reflect.Type -} - -// NewRegistry creates a new empty Registry. -func NewRegistry() *Registry { - return &Registry{ - typeEncoders: new(typeEncoderCache), - typeDecoders: new(typeDecoderCache), - kindEncoders: new(kindEncoderCache), - kindDecoders: new(kindDecoderCache), - } -} - -// RegisterTypeEncoder registers the provided ValueEncoder for the provided type. -// -// The type will be used as provided, so an encoder can be registered for a type and a different -// encoder can be registered for a pointer to that type. -// -// If the given type is an interface, the encoder will be called when marshaling a type that is -// that interface. It will not be called when marshaling a non-interface type that implements the -// interface. To get the latter behavior, call RegisterHookEncoder instead. -// -// RegisterTypeEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { - r.typeEncoders.Store(valueType, enc) -} - -// RegisterTypeDecoder registers the provided ValueDecoder for the provided type. -// -// The type will be used as provided, so a decoder can be registered for a type and a different -// decoder can be registered for a pointer to that type. -// -// If the given type is an interface, the decoder will be called when unmarshaling into a type that -// is that interface. It will not be called when unmarshaling into a non-interface type that -// implements the interface. To get the latter behavior, call RegisterHookDecoder instead. -// -// RegisterTypeDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { - r.typeDecoders.Store(valueType, dec) -} - -// RegisterKindEncoder registers the provided ValueEncoder for the provided kind. -// -// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For -// example, consider the type MyInt defined as -// -// type MyInt int32 -// -// To define an encoder for MyInt and int32, use RegisterKindEncoder like -// -// reg.RegisterKindEncoder(reflect.Int32, myEncoder) -// -// RegisterKindEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { - r.kindEncoders.Store(kind, enc) -} - -// RegisterKindDecoder registers the provided ValueDecoder for the provided kind. -// -// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For -// example, consider the type MyInt defined as -// -// type MyInt int32 -// -// To define an decoder for MyInt and int32, use RegisterKindDecoder like -// -// reg.RegisterKindDecoder(reflect.Int32, myDecoder) -// -// RegisterKindDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { - r.kindDecoders.Store(kind, dec) -} - -// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will -// be called when marshaling a type if the type implements iface or a pointer to the type -// implements iface. If the provided type is not an interface -// (i.e. iface.Kind() != reflect.Interface), this method will panic. -// -// RegisterInterfaceEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) { - if iface.Kind() != reflect.Interface { - panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", iface, iface.Kind()) - panic(panicStr) - } - - for idx, encoder := range r.interfaceEncoders { - if encoder.i == iface { - r.interfaceEncoders[idx].ve = enc - return - } - } - - r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc}) -} - -// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will -// be called when unmarshaling into a type if the type implements iface or a pointer to the type -// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface), -// this method will panic. -// -// RegisterInterfaceDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) { - if iface.Kind() != reflect.Interface { - panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", iface, iface.Kind()) - panic(panicStr) - } - - for idx, decoder := range r.interfaceDecoders { - if decoder.i == iface { - r.interfaceDecoders[idx].vd = dec - return - } - } - - r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec}) -} - -// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this -// mapping is decoding situations where an empty interface is used and a default type needs to be -// created and decoded into. -// -// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON -// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents -// to decode to bson.Raw, use the following code: -// -// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) -func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { - r.typeMap.Store(bt, rt) -} - -// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup -// order: -// -// 1. An encoder registered for the exact type. If the given type is an interface, an encoder -// registered using RegisterTypeEncoder for that interface will be selected. -// -// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type -// or by a pointer to the type. -// -// 3. An encoder registered using RegisterKindEncoder for the kind of value. -// -// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for -// concurrent use by multiple goroutines after all codecs and encoders are registered. -func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { - if valueType == nil { - return nil, ErrNoEncoder{Type: valueType} - } - enc, found := r.lookupTypeEncoder(valueType) - if found { - if enc == nil { - return nil, ErrNoEncoder{Type: valueType} - } - return enc, nil - } - - enc, found = r.lookupInterfaceEncoder(valueType, true) - if found { - return r.typeEncoders.LoadOrStore(valueType, enc), nil - } - - if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { - return r.storeTypeEncoder(valueType, v), nil - } - return nil, ErrNoEncoder{Type: valueType} -} - -func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { - return r.typeEncoders.LoadOrStore(rt, enc) -} - -func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { - return r.typeEncoders.Load(rt) -} - -func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { - if valueType == nil { - return nil, false - } - for _, ienc := range r.interfaceEncoders { - if valueType.Implements(ienc.i) { - return ienc.ve, true - } - if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) { - // if *t implements an interface, this will catch if t implements an interface further - // ahead in interfaceEncoders - defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) - if !found { - defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) - } - return newCondAddrEncoder(ienc.ve, defaultEnc), true - } - } - return nil, false -} - -// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup -// order: -// -// 1. A decoder registered for the exact type. If the given type is an interface, a decoder -// registered using RegisterTypeDecoder for that interface will be selected. -// -// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by -// a pointer to the type. -// -// 3. A decoder registered using RegisterKindDecoder for the kind of value. -// -// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for -// concurrent use by multiple goroutines after all codecs and decoders are registered. -func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { - if valueType == nil { - return nil, ErrNilType - } - dec, found := r.lookupTypeDecoder(valueType) - if found { - if dec == nil { - return nil, ErrNoDecoder{Type: valueType} - } - return dec, nil - } - - dec, found = r.lookupInterfaceDecoder(valueType, true) - if found { - return r.storeTypeDecoder(valueType, dec), nil - } - - if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { - return r.storeTypeDecoder(valueType, v), nil - } - return nil, ErrNoDecoder{Type: valueType} -} - -func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { - return r.typeDecoders.Load(valueType) -} - -func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { - return r.typeDecoders.LoadOrStore(typ, dec) -} - -func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { - for _, idec := range r.interfaceDecoders { - if valueType.Implements(idec.i) { - return idec.vd, true - } - if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) { - // if *t implements an interface, this will catch if t implements an interface further - // ahead in interfaceDecoders - defaultDec, found := r.lookupInterfaceDecoder(valueType, false) - if !found { - defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) - } - return newCondAddrDecoder(idec.vd, defaultDec), true - } - } - return nil, false -} - -// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON -// type. If no type is found, ErrNoTypeMapEntry is returned. -// -// LookupTypeMapEntry should not be called concurrently with any other Registry method. -func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { - v, ok := r.typeMap.Load(bt) - if v == nil || !ok { - return nil, ErrNoTypeMapEntry{Type: bt} - } - return v.(reflect.Type), nil -} - -type interfaceValueEncoder struct { - i reflect.Type - ve ValueEncoder -} - -type interfaceValueDecoder struct { - i reflect.Type - vd ValueDecoder -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go deleted file mode 100644 index 14c9fd25..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var defaultSliceCodec = NewSliceCodec() - -// SliceCodec is the Codec used for slice values. -// -// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To -// configure the slice encode and decode behavior, use the configuration methods -// on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and -// decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to marshal nil Go slices as empty -// BSON arrays, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// NilSliceAsEmpty: true, -// }) -// -// See the deprecation notice for each field in SliceCodec for the corresponding -// settings. -type SliceCodec struct { - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of - // BSON null. - // - // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead. - EncodeNilAsEmpty bool -} - -// NewSliceCodec returns a MapCodec with options opts. -// -// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See -// [SliceCodec] for more details. -func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { - sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) - - codec := SliceCodec{} - if sliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for slice types. -func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty { - return vw.WriteNull() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - byteSlice := make([]byte, val.Len()) - reflect.Copy(reflect.ValueOf(byteSlice), val) - return vw.WriteBinary(byteSlice) - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type() == tD || val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// DecodeValue is the ValueDecoder for slice types. -func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) - } - val.SetLen(0) - val.Set(reflect.AppendSlice(val, reflect.ValueOf(data))) - return nil - case bsontype.String: - if sliceType := val.Type().Elem(); sliceType != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType) - } - str, err := vr.ReadString() - if err != nil { - return err - } - byteStr := []byte(str) - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) - } - val.SetLen(0) - val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr))) - return nil - default: - return fmt.Errorf("cannot decode %v into a slice", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = defaultValueDecoders.decodeD - default: - elemsFunc = defaultValueDecoders.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go deleted file mode 100644 index a8f885a8..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// StringCodec is the Codec used for string values. -// -// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To -// override the default string encode and decode behavior, create a new registry -// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new -// encoder and decoder for strings. -// -// For example, -// -// reg := bson.NewRegistry() -// reg.RegisterKindEncoder(reflect.String, myStringEncoder) -// reg.RegisterKindDecoder(reflect.String, myStringDecoder) -type StringCodec struct { - // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. - // If false, a string made from the raw object ID bytes will be used. Defaults to true. - // - // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. - DecodeObjectIDAsHex bool -} - -var ( - defaultStringCodec = NewStringCodec() - - // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be - // used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultStringCodec -) - -// NewStringCodec returns a StringCodec with options opts. -// -// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See -// [StringCodec] for more details. -func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { - stringOpt := bsonoptions.MergeStringCodecOptions(opts...) - return &StringCodec{*stringOpt.DecodeObjectIDAsHex} -} - -// EncodeValue is the ValueEncoder for string types. -func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.String { - return emptyValue, ValueDecoderError{ - Name: "StringDecodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: reflect.Zero(t), - } - } - - var str string - var err error - switch vr.Type() { - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - case bsontype.ObjectID: - oid, err := vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - if sc.DecodeObjectIDAsHex { - str = oid.Hex() - } else { - // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string. - byteArray := [12]byte(oid) - str = string(byteArray[:]) - } - case bsontype.Symbol: - str, err = vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"} - } - str = string(data) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - - return reflect.ValueOf(str), nil -} - -// DecodeValue is the ValueDecoder for string types. -func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - elem, err := sc.decodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go deleted file mode 100644 index f8d9690c..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ /dev/null @@ -1,736 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strings" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type. -type DecodeError struct { - keys []string - wrapped error -} - -// Unwrap returns the underlying error -func (de *DecodeError) Unwrap() error { - return de.wrapped -} - -// Error implements the error interface. -func (de *DecodeError) Error() string { - // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the - // stack of BSON keys, so we call de.Keys(), which reverses them. - keyPath := strings.Join(de.Keys(), ".") - return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped) -} - -// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down -// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be -// a string, the keys slice will be ["a", "b", "c"]. -func (de *DecodeError) Keys() []string { - reversedKeys := make([]string, 0, len(de.keys)) - for idx := len(de.keys) - 1; idx >= 0; idx-- { - reversedKeys = append(reversedKeys, de.keys[idx]) - } - - return reversedKeys -} - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// StructCodec is the Codec used for struct values. -// -// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0. -// To configure the struct encode and decode behavior, use the configuration -// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode -// and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to omit zero-value structs when -// using the "omitempty" struct tag, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// OmitZeroStruct: true, -// }) -// -// See the deprecation notice for each field in StructCodec for the corresponding -// settings. -type StructCodec struct { - cache sync.Map // map[reflect.Type]*structDescription - parser StructTagParser - - // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the - // destination value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead. - DecodeZeroStruct bool - - // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the - // destination value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. - DecodeDeepZeroInline bool - - // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g. - // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag - // option is set. - // - // Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead. - EncodeOmitDefaultStruct bool - - // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. - // - // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be - // supported in Go Driver 2.0. - AllowUnexportedFields bool - - // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is - // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The - // default value is true. - // - // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or - // options.BSONOptions.ErrorOnInlineDuplicates instead. - OverwriteDuplicatedInlinedFields bool -} - -var _ ValueEncoder = &StructCodec{} -var _ ValueDecoder = &StructCodec{} - -// NewStructCodec returns a StructCodec that uses p for struct tag parsing. -// -// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See -// [StructCodec] for more details. -func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { - if p == nil { - return nil, errors.New("a StructTagParser must be provided to NewStructCodec") - } - - structOpt := bsonoptions.MergeStructCodecOptions(opts...) - - codec := &StructCodec{ - parser: p, - } - - if structOpt.DecodeZeroStruct != nil { - codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct - } - if structOpt.DecodeDeepZeroInline != nil { - codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline - } - if structOpt.EncodeOmitDefaultStruct != nil { - codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct - } - if structOpt.OverwriteDuplicatedInlinedFields != nil { - codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields - } - if structOpt.AllowUnexportedFields != nil { - codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields - } - - return codec, nil -} - -// EncodeValue handles encoding generic struct types. -func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Struct { - return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates) - if err != nil { - return err - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - var rv reflect.Value - for _, desc := range sd.fl { - if desc.inline == nil { - rv = val.Field(desc.idx) - } else { - rv, err = fieldByIndexErr(val, desc.inline) - if err != nil { - continue - } - } - - desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) - - if err != nil && !errors.Is(err, errInvalidValue) { - return err - } - - if errors.Is(err, errInvalidValue) { - if desc.omitEmpty { - continue - } - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - err = vw2.WriteNull() - if err != nil { - return err - } - continue - } - - if desc.encoder == nil { - return ErrNoEncoder{Type: rv.Type()} - } - - encoder := desc.encoder - - var empty bool - if cz, ok := encoder.(CodecZeroer); ok { - empty = cz.IsTypeZero(rv.Interface()) - } else if rv.Kind() == reflect.Interface { - // isEmpty will not treat an interface rv as an interface, so we need to check for the - // nil interface separately. - empty = rv.IsNil() - } else { - empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) - } - if desc.omitEmpty && empty { - continue - } - - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - - ectx := EncodeContext{ - Registry: ec.Registry, - MinSize: desc.minSize || ec.MinSize, - errorOnInlineDuplicates: ec.errorOnInlineDuplicates, - stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt, - nilMapAsEmpty: ec.nilMapAsEmpty, - nilSliceAsEmpty: ec.nilSliceAsEmpty, - nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty, - omitZeroStruct: ec.omitZeroStruct, - useJSONStructTags: ec.useJSONStructTags, - } - err = encoder.EncodeValue(ectx, vw2, rv) - if err != nil { - return err - } - } - - if sd.inlineMap >= 0 { - rv := val.Field(sd.inlineMap) - collisionFn := func(key string) bool { - _, exists := sd.fm[key] - return exists - } - - return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn) - } - - return dw.WriteDocumentEnd() -} - -func newDecodeError(key string, original error) error { - var de *DecodeError - if !errors.As(original, &de) { - return &DecodeError{ - keys: []string{key}, - wrapped: original, - } - } - - de.keys = append(de.keys, key) - return de -} - -// DecodeValue implements the Codec interface. -// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. -// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. -func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Struct { - return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false) - if err != nil { - return err - } - - if sc.DecodeZeroStruct || dc.zeroStructs { - val.Set(reflect.Zero(val.Type())) - } - if sc.DecodeDeepZeroInline && sd.inline { - val.Set(deepZero(val.Type())) - } - - var decoder ValueDecoder - var inlineMap reflect.Value - if sd.inlineMap >= 0 { - inlineMap = val.Field(sd.inlineMap) - decoder, err = dc.LookupDecoder(inlineMap.Type().Elem()) - if err != nil { - return err - } - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - for { - name, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return err - } - - fd, exists := sd.fm[name] - if !exists { - // if the original name isn't found in the struct description, try again with the name in lowercase - // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field - // names - fd, exists = sd.fm[strings.ToLower(name)] - } - - if !exists { - if sd.inlineMap < 0 { - // The encoding/json package requires a flag to return on error for non-existent fields. - // This functionality seems appropriate for the struct codec. - err = vr.Skip() - if err != nil { - return err - } - continue - } - - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - - elem := reflect.New(inlineMap.Type().Elem()).Elem() - dc.Ancestor = inlineMap.Type() - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - inlineMap.SetMapIndex(reflect.ValueOf(name), elem) - continue - } - - var field reflect.Value - if fd.inline == nil { - field = val.Field(fd.idx) - } else { - field, err = getInlineField(val, fd.inline) - if err != nil { - return err - } - } - - if !field.CanSet() { // Being settable is a super set of being addressable. - innerErr := fmt.Errorf("field %v is not settable", field) - return newDecodeError(fd.name, innerErr) - } - if field.Kind() == reflect.Ptr && field.IsNil() { - field.Set(reflect.New(field.Type().Elem())) - } - field = field.Addr() - - dctx := DecodeContext{ - Registry: dc.Registry, - Truncate: fd.truncate || dc.Truncate, - defaultDocumentType: dc.defaultDocumentType, - binaryAsSlice: dc.binaryAsSlice, - useJSONStructTags: dc.useJSONStructTags, - useLocalTimeZone: dc.useLocalTimeZone, - zeroMaps: dc.zeroMaps, - zeroStructs: dc.zeroStructs, - } - - if fd.decoder == nil { - return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) - } - - err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) - if err != nil { - return newDecodeError(fd.name, err) - } - } - - return nil -} - -func isEmpty(v reflect.Value, omitZeroStruct bool) bool { - kind := v.Kind() - if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { - return v.Interface().(Zeroer).IsZero() - } - switch kind { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Struct: - if !omitZeroStruct { - return false - } - vt := v.Type() - if vt == tTime { - return v.Interface().(time.Time).IsZero() - } - numField := vt.NumField() - for i := 0; i < numField; i++ { - ff := vt.Field(i) - if ff.PkgPath != "" && !ff.Anonymous { - continue // Private field - } - if !isEmpty(v.Field(i), omitZeroStruct) { - return false - } - } - return true - } - return !v.IsValid() || v.IsZero() -} - -type structDescription struct { - fm map[string]fieldDescription - fl []fieldDescription - inlineMap int - inline bool -} - -type fieldDescription struct { - name string // BSON key name - fieldName string // struct field name - idx int - omitEmpty bool - minSize bool - truncate bool - inline []int - encoder ValueEncoder - decoder ValueDecoder -} - -type byIndex []fieldDescription - -func (bi byIndex) Len() int { return len(bi) } - -func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] } - -func (bi byIndex) Less(i, j int) bool { - // If a field is inlined, its index in the top level struct is stored at inline[0] - iIdx, jIdx := bi[i].idx, bi[j].idx - if len(bi[i].inline) > 0 { - iIdx = bi[i].inline[0] - } - if len(bi[j].inline) > 0 { - jIdx = bi[j].inline[0] - } - if iIdx != jIdx { - return iIdx < jIdx - } - for k, biik := range bi[i].inline { - if k >= len(bi[j].inline) { - return false - } - if biik != bi[j].inline[k] { - return biik < bi[j].inline[k] - } - } - return len(bi[i].inline) < len(bi[j].inline) -} - -func (sc *StructCodec) describeStruct( - r *Registry, - t reflect.Type, - useJSONStructTags bool, - errorOnDuplicates bool, -) (*structDescription, error) { - // We need to analyze the struct, including getting the tags, collecting - // information about inlining, and create a map of the field name to the field. - if v, ok := sc.cache.Load(t); ok { - return v.(*structDescription), nil - } - // TODO(charlie): Only describe the struct once when called - // concurrently with the same type. - ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) - if err != nil { - return nil, err - } - if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { - ds = v.(*structDescription) - } - return ds, nil -} - -func (sc *StructCodec) describeStructSlow( - r *Registry, - t reflect.Type, - useJSONStructTags bool, - errorOnDuplicates bool, -) (*structDescription, error) { - numFields := t.NumField() - sd := &structDescription{ - fm: make(map[string]fieldDescription, numFields), - fl: make([]fieldDescription, 0, numFields), - inlineMap: -1, - } - - var fields []fieldDescription - for i := 0; i < numFields; i++ { - sf := t.Field(i) - if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { - // field is private or unexported fields aren't allowed, ignore - continue - } - - sfType := sf.Type - encoder, err := r.LookupEncoder(sfType) - if err != nil { - encoder = nil - } - decoder, err := r.LookupDecoder(sfType) - if err != nil { - decoder = nil - } - - description := fieldDescription{ - fieldName: sf.Name, - idx: i, - encoder: encoder, - decoder: decoder, - } - - var stags StructTags - // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser - // instead of the parser defined on the codec. - if useJSONStructTags { - stags, err = JSONFallbackStructTagParser.ParseStructTags(sf) - } else { - stags, err = sc.parser.ParseStructTags(sf) - } - if err != nil { - return nil, err - } - if stags.Skip { - continue - } - description.name = stags.Name - description.omitEmpty = stags.OmitEmpty - description.minSize = stags.MinSize - description.truncate = stags.Truncate - - if stags.Inline { - sd.inline = true - switch sfType.Kind() { - case reflect.Map: - if sd.inlineMap >= 0 { - return nil, errors.New("(struct " + t.String() + ") multiple inline maps") - } - if sfType.Key() != tString { - return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") - } - sd.inlineMap = description.idx - case reflect.Ptr: - sfType = sfType.Elem() - if sfType.Kind() != reflect.Struct { - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - fallthrough - case reflect.Struct: - inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates) - if err != nil { - return nil, err - } - for _, fd := range inlinesf.fl { - if fd.inline == nil { - fd.inline = []int{i, fd.idx} - } else { - fd.inline = append([]int{i}, fd.inline...) - } - fields = append(fields, fd) - - } - default: - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - continue - } - fields = append(fields, description) - } - - // Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name - sort.Slice(fields, func(i, j int) bool { - x := fields - // sort field by name, breaking ties with depth, then - // breaking ties with index sequence. - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].inline) != len(x[j].inline) { - return len(x[i].inline) < len(x[j].inline) - } - return byIndex(x).Less(i, j) - }) - - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - sd.fl = append(sd.fl, fi) - sd.fm[name] = fi - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates { - return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) - } - sd.fl = append(sd.fl, dominant) - sd.fm[name] = dominant - } - - sort.Sort(byIndex(sd.fl)) - - return sd, nil -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's inlining rules. If there are multiple top-level -// fields, the boolean will be false: This condition is an error in Go -// and we skip all the fields. -func dominantField(fields []fieldDescription) (fieldDescription, bool) { - // The fields are sorted in increasing index-length order, then by presence of tag. - // That means that the first field is the dominant one. We need only check - // for error cases: two fields at top level. - if len(fields) > 1 && - len(fields[0].inline) == len(fields[1].inline) { - return fieldDescription{}, false - } - return fields[0], true -} - -func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { - defer func() { - if recovered := recover(); recovered != nil { - switch r := recovered.(type) { - case string: - err = fmt.Errorf("%s", r) - case error: - err = r - } - } - }() - - result = v.FieldByIndex(index) - return -} - -func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { - field, err := fieldByIndexErr(val, index) - if err == nil { - return field, nil - } - - // if parent of this element doesn't exist, fix its parent - inlineParent := index[:len(index)-1] - var fParent reflect.Value - if fParent, err = fieldByIndexErr(val, inlineParent); err != nil { - fParent, err = getInlineField(val, inlineParent) - if err != nil { - return fParent, err - } - } - fParent.Set(reflect.New(fParent.Type().Elem())) - - return fieldByIndexErr(val, index) -} - -// DeepZero returns recursive zero object -func deepZero(st reflect.Type) (result reflect.Value) { - if st.Kind() == reflect.Struct { - numField := st.NumField() - for i := 0; i < numField; i++ { - if result == emptyValue { - result = reflect.Indirect(reflect.New(st)) - } - f := result.Field(i) - if f.CanInterface() { - if f.Type().Kind() == reflect.Struct { - result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) - } - } - } - } - return result -} - -// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside -func recursivePointerTo(v reflect.Value) reflect.Value { - v = reflect.Indirect(v) - result := reflect.New(v.Type()) - if v.Kind() == reflect.Struct { - for i := 0; i < v.NumField(); i++ { - if f := v.Field(i); f.Kind() == reflect.Ptr { - if f.Elem().Kind() == reflect.Struct { - result.Elem().Field(i).Set(recursivePointerTo(f)) - } - } - } - } - - return result -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go deleted file mode 100644 index 18d85bfb..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "strings" -) - -// StructTagParser returns the struct tags for a given struct field. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTagParser interface { - ParseStructTags(reflect.StructField) (StructTags, error) -} - -// StructTagParserFunc is an adapter that allows a generic function to be used -// as a StructTagParser. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTagParserFunc func(reflect.StructField) (StructTags, error) - -// ParseStructTags implements the StructTagParser interface. -func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { - return stpf(sf) -} - -// StructTags represents the struct tag fields that the StructCodec uses during -// the encoding and decoding process. -// -// In the case of a struct, the lowercased field name is used as the key for each exported -// field but this behavior may be changed using a struct tag. The tag may also contain flags to -// adjust the marshalling behavior for the field. -// -// The properties are defined below: -// -// OmitEmpty Only include the field if it's not set to the zero value for the type or to -// empty slices or maps. -// -// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's -// feasible while preserving the numeric value. -// -// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within -// a float32. -// -// Inline Inline the field, which must be a struct or a map, causing all of its fields -// or keys to be processed as if they were part of the outer struct. For maps, -// keys must not conflict with the bson keys of other struct fields. -// -// Skip This struct field should be skipped. This is usually denoted by parsing a "-" -// for the name. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTags struct { - Name string - OmitEmpty bool - MinSize bool - Truncate bool - Inline bool - Skip bool -} - -// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. -// It will handle the bson struct tag. See the documentation for StructTags to see -// what each of the returned fields means. -// -// If there is no name in the struct tag fields, the struct field name is lowercased. -// The tag formats accepted are: -// -// "[][,[,]]" -// -// `(...) bson:"[][,[,]]" (...)` -// -// An example: -// -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } -// -// A struct tag either consisting entirely of '-' or with a bson key with a -// value consisting entirely of '-' will return a StructTags with Skip true and -// the remaining fields will be their default values. -// -// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0. -var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - return parseTags(key, tag) -} - -func parseTags(key string, tag string) (StructTags, error) { - var st StructTags - if tag == "-" { - st.Skip = true - return st, nil - } - - for idx, str := range strings.Split(tag, ",") { - if idx == 0 && str != "" { - key = str - } - switch str { - case "omitempty": - st.OmitEmpty = true - case "minsize": - st.MinSize = true - case "truncate": - st.Truncate = true - case "inline": - st.Inline = true - } - } - - st.Name = key - - return st, nil -} - -// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser -// but will also fallback to parsing the json tag instead on a field where the -// bson tag isn't available. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and -// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. -var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok { - tag, ok = sf.Tag.Lookup("json") - } - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - - return parseTags(key, tag) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go deleted file mode 100644 index 22fb762c..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -const ( - timeFormatString = "2006-01-02T15:04:05.999Z07:00" -) - -// TimeCodec is the Codec used for time.Time values. -// -// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0. -// To configure the time.Time encode and decode behavior, use the configuration -// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode -// and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to ..., use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// UseLocalTimeZone: true, -// }) -// -// See the deprecation notice for each field in TimeCodec for the corresponding -// settings. -type TimeCodec struct { - // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. - // - // Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone - // instead. - UseLocalTimeZone bool -} - -var ( - defaultTimeCodec = NewTimeCodec() - - // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used - // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. - _ typeDecoder = defaultTimeCodec -) - -// NewTimeCodec returns a TimeCodec with options opts. -// -// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See -// [TimeCodec] for more details. -func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { - timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) - - codec := TimeCodec{} - if timeOpt.UseLocalTimeZone != nil { - codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone - } - return &codec -} - -func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tTime { - return emptyValue, ValueDecoderError{ - Name: "TimeDecodeValue", - Types: []reflect.Type{tTime}, - Received: reflect.Zero(t), - } - } - - var timeVal time.Time - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err := vr.ReadDateTime() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(dt/1000, dt%1000*1000000) - case bsontype.String: - // assume strings are in the isoTimeFormat - timeStr, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - timeVal, err = time.Parse(timeFormatString, timeStr) - if err != nil { - return emptyValue, err - } - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(i64/1000, i64%1000*1000000) - case bsontype.Timestamp: - t, _, err := vr.ReadTimestamp() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(int64(t), 0) - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) - } - - if !tc.UseLocalTimeZone && !dc.useLocalTimeZone { - timeVal = timeVal.UTC() - } - return reflect.ValueOf(timeVal), nil -} - -// DecodeValue is the ValueDecoderFunc for time.Time. -func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - elem, err := tc.decodeType(dc, vr, tTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// EncodeValue is the ValueEncoderFunc for time.TIme. -func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go deleted file mode 100644 index 6ade17b7..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "net/url" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var tBool = reflect.TypeOf(false) -var tFloat64 = reflect.TypeOf(float64(0)) -var tInt32 = reflect.TypeOf(int32(0)) -var tInt64 = reflect.TypeOf(int64(0)) -var tString = reflect.TypeOf("") -var tTime = reflect.TypeOf(time.Time{}) - -var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() -var tByteSlice = reflect.TypeOf([]byte(nil)) -var tByte = reflect.TypeOf(byte(0x00)) -var tURL = reflect.TypeOf(url.URL{}) -var tJSONNumber = reflect.TypeOf(json.Number("")) - -var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() -var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() -var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() -var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() -var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() - -var tBinary = reflect.TypeOf(primitive.Binary{}) -var tUndefined = reflect.TypeOf(primitive.Undefined{}) -var tOID = reflect.TypeOf(primitive.ObjectID{}) -var tDateTime = reflect.TypeOf(primitive.DateTime(0)) -var tNull = reflect.TypeOf(primitive.Null{}) -var tRegex = reflect.TypeOf(primitive.Regex{}) -var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) -var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) -var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) -var tSymbol = reflect.TypeOf(primitive.Symbol("")) -var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) -var tDecimal = reflect.TypeOf(primitive.Decimal128{}) -var tMinKey = reflect.TypeOf(primitive.MinKey{}) -var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) -var tD = reflect.TypeOf(primitive.D{}) -var tA = reflect.TypeOf(primitive.A{}) -var tE = reflect.TypeOf(primitive.E{}) - -var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) -var tCoreArray = reflect.TypeOf(bsoncore.Array{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go deleted file mode 100644 index 85254727..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "math" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// UIntCodec is the Codec used for uint values. -// -// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To -// configure the uint encode and decode behavior, use the configuration methods -// on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and -// decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to marshal Go uint values as the -// minimum BSON int size that can represent the value, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// IntMinSize: true, -// }) -// -// See the deprecation notice for each field in UIntCodec for the corresponding -// settings. -type UIntCodec struct { - // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the - // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. - // - // Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead. - EncodeToMinSize bool -} - -var ( - defaultUIntCodec = NewUIntCodec() - - // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used - // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. - _ typeDecoder = defaultUIntCodec -) - -// NewUIntCodec returns a UIntCodec with options opts. -// -// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See -// [UIntCodec] for more details. -func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { - uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) - - codec := UIntCodec{} - if uintOpt.EncodeToMinSize != nil { - codec.EncodeToMinSize = *uintOpt.EncodeToMinSize - } - return &codec -} - -// EncodeValue is the ValueEncoder for uint types. -func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - - // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32 - useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64) - - if u64 <= math.MaxInt32 && useMinSize { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return emptyValue, fmt.Errorf("%d overflows uint8", i64) - } - - return reflect.ValueOf(uint8(i64)), nil - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return emptyValue, fmt.Errorf("%d overflows uint16", i64) - } - - return reflect.ValueOf(uint16(i64)), nil - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return emptyValue, fmt.Errorf("%d overflows uint32", i64) - } - - return reflect.ValueOf(uint32(i64)), nil - case reflect.Uint64: - if i64 < 0 { - return emptyValue, fmt.Errorf("%d overflows uint64", i64) - } - - return reflect.ValueOf(uint64(i64)), nil - case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint - return emptyValue, fmt.Errorf("%d overflows uint", i64) - } - - return reflect.ValueOf(uint(i64)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: reflect.Zero(t), - } - } -} - -// DecodeValue is the ValueDecoder for uint types. -func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - elem, err := uic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetUint(elem.Uint()) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go deleted file mode 100644 index 996bd171..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type ByteSliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -} - -// ByteSliceCodec creates a new *ByteSliceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func ByteSliceCodec() *ByteSliceCodecOptions { - return &ByteSliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. -func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { - bs.EncodeNilAsEmpty = &b - return bs -} - -// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { - bs := ByteSliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return bs -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go deleted file mode 100644 index c40973c8..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonoptions defines the optional configurations for the BSON codecs. -package bsonoptions diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go deleted file mode 100644 index f522c7e0..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type EmptyInterfaceCodecOptions struct { - DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -} - -// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { - return &EmptyInterfaceCodecOptions{} -} - -// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. -func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { - e.DecodeBinaryAsSlice = &b - return e -} - -// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { - e := EmptyInterfaceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeBinaryAsSlice != nil { - e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice - } - } - - return e -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go deleted file mode 100644 index a7a7c1d9..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// MapCodecOptions represents all possible options for map encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type MapCodecOptions struct { - DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. - EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. - // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must - // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a - // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the - // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override - // TextMarshaler/TextUnmarshaler. Defaults to false. - EncodeKeysWithStringer *bool -} - -// MapCodec creates a new *MapCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func MapCodec() *MapCodecOptions { - return &MapCodecOptions{} -} - -// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. -func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { - t.DecodeZerosMap = &b - return t -} - -// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. -func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { - t.EncodeNilAsEmpty = &b - return t -} - -// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the -// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key -// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with -// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer -// will override TextMarshaler/TextUnmarshaler. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. -func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { - t.EncodeKeysWithStringer = &b - return t -} - -// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { - s := MapCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeZerosMap != nil { - s.DecodeZerosMap = opt.DecodeZerosMap - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - if opt.EncodeKeysWithStringer != nil { - s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go deleted file mode 100644 index 3c1e4f35..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// SliceCodecOptions represents all possible options for slice encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type SliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -} - -// SliceCodec creates a new *SliceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func SliceCodec() *SliceCodecOptions { - return &SliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. -func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { - s.EncodeNilAsEmpty = &b - return s -} - -// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { - s := SliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go deleted file mode 100644 index f8b76f99..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultDecodeOIDAsHex = true - -// StringCodecOptions represents all possible options for string encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type StringCodecOptions struct { - DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. -} - -// StringCodec creates a new *StringCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func StringCodec() *StringCodecOptions { - return &StringCodecOptions{} -} - -// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made -// from the raw object ID bytes will be used. Defaults to true. -// -// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. -func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { - t.DecodeObjectIDAsHex = &b - return t -} - -// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { - s := &StringCodecOptions{&defaultDecodeOIDAsHex} - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeObjectIDAsHex != nil { - s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go deleted file mode 100644 index 1cbfa32e..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultOverwriteDuplicatedInlinedFields = true - -// StructCodecOptions represents all possible options for struct encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type StructCodecOptions struct { - DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. - DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. - EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. - AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. - OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true. -} - -// StructCodec creates a new *StructCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func StructCodec() *StructCodecOptions { - return &StructCodecOptions{} -} - -// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. -func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { - t.DecodeZeroStruct = &b - return t -} - -// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. -// -// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. -func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { - t.DecodeDeepZeroInline = &b - return t -} - -// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all -// its values set to their default value. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. -func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { - t.EncodeOmitDefaultStruct = &b - return t -} - -// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the -// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when -// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if -// there are duplicate keys after the struct is inlined. Defaults to true. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. -func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { - t.OverwriteDuplicatedInlinedFields = &b - return t -} - -// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. -// -// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be -// supported in Go Driver 2.0. -func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { - t.AllowUnexportedFields = &b - return t -} - -// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { - s := &StructCodecOptions{ - OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, - } - for _, opt := range opts { - if opt == nil { - continue - } - - if opt.DecodeZeroStruct != nil { - s.DecodeZeroStruct = opt.DecodeZeroStruct - } - if opt.DecodeDeepZeroInline != nil { - s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline - } - if opt.EncodeOmitDefaultStruct != nil { - s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct - } - if opt.OverwriteDuplicatedInlinedFields != nil { - s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields - } - if opt.AllowUnexportedFields != nil { - s.AllowUnexportedFields = opt.AllowUnexportedFields - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go deleted file mode 100644 index 3f38433d..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// TimeCodecOptions represents all possible options for time.Time encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type TimeCodecOptions struct { - UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. -} - -// TimeCodec creates a new *TimeCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func TimeCodec() *TimeCodecOptions { - return &TimeCodecOptions{} -} - -// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. -func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { - t.UseLocalTimeZone = &b - return t -} - -// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { - t := TimeCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.UseLocalTimeZone != nil { - t.UseLocalTimeZone = opt.UseLocalTimeZone - } - } - - return t -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go deleted file mode 100644 index 5091e4d9..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// UIntCodecOptions represents all possible options for uint encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type UIntCodecOptions struct { - EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -} - -// UIntCodec creates a new *UIntCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func UIntCodec() *UIntCodecOptions { - return &UIntCodecOptions{} -} - -// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead. -func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { - u.EncodeToMinSize = &b - return u -} - -// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { - u := UIntCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeToMinSize != nil { - u.EncodeToMinSize = opt.EncodeToMinSize - } - } - - return u -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go deleted file mode 100644 index 1e25570b..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "errors" - "fmt" - "io" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// Copier is a type that allows copying between ValueReaders, ValueWriters, and -// []byte values. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -type Copier struct{} - -// NewCopier creates a new copier with the given registry. If a nil registry is provided -// a default registry is used. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func NewCopier() Copier { - return Copier{} -} - -// CopyDocument handles copying a document from src to dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func CopyDocument(dst ValueWriter, src ValueReader) error { - return Copier{}.CopyDocument(dst, src) -} - -// CopyDocument handles copying one document from the src to the dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { - dr, err := src.ReadDocument() - if err != nil { - return err - } - - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - return c.copyDocumentCore(dw, dr) -} - -// CopyArrayFromBytes copies the values from a BSON array represented as a -// []byte to a ValueWriter. -// -// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { - aw, err := dst.WriteArray() - if err != nil { - return err - } - - err = c.CopyBytesToArrayWriter(aw, src) - if err != nil { - return err - } - - return aw.WriteArrayEnd() -} - -// CopyDocumentFromBytes copies the values from a BSON document represented as a -// []byte to a ValueWriter. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - err = c.CopyBytesToDocumentWriter(dw, src) - if err != nil { - return err - } - - return dw.WriteDocumentEnd() -} - -type writeElementFn func(key string) (ValueWriter, error) - -// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an -// ArrayWriter. -// -// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go -// Driver 2.0. -func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { - wef := func(_ string) (ValueWriter, error) { - return dst.WriteArrayElement() - } - - return c.copyBytesToValueWriter(src, wef) -} - -// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a -// DocumentWriter. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { - wef := func(key string) (ValueWriter, error) { - return dst.WriteDocumentElement(key) - } - - return c.copyBytesToValueWriter(src, wef) -} - -func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { - // TODO(skriptble): Create errors types here. Anything that is a tag should be a property. - length, rem, ok := bsoncore.ReadLength(src) - if !ok { - return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) - } - if len(src) < int(length) { - return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) - } - rem = rem[:length-4] - - var t bsontype.Type - var key string - var val bsoncore.Value - for { - t, rem, ok = bsoncore.ReadType(rem) - if !ok { - return io.EOF - } - if t == bsontype.Type(0) { - if len(rem) != 0 { - return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) - } - break - } - - key, rem, ok = bsoncore.ReadKey(rem) - if !ok { - return fmt.Errorf("invalid key found. remaining bytes=%v", rem) - } - - // write as either array element or document element using writeElementFn - vw, err := wef(key) - if err != nil { - return err - } - - val, rem, ok = bsoncore.ReadValue(rem, t) - if !ok { - return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) - } - err = c.CopyValueFromBytes(vw, t, val.Data) - if err != nil { - return err - } - } - return nil -} - -// CopyDocumentToBytes copies an entire document from the ValueReader and -// returns it as bytes. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { - return c.AppendDocumentBytes(nil, src) -} - -// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will -// append the result to dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - vw.reset(dst) - - err := c.CopyDocument(vw, src) - dst = vw.buf - return dst, err -} - -// AppendArrayBytes copies an array from the ValueReader to dst. -// -// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - vw.reset(dst) - - err := c.copyArray(vw, src) - dst = vw.buf - return dst, err -} - -// CopyValueFromBytes will write the value represtend by t and src to dst. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead. -func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { - if wvb, ok := dst.(BytesWriter); ok { - return wvb.WriteValueBytes(t, src) - } - - vr := vrPool.Get().(*valueReader) - defer vrPool.Put(vr) - - vr.reset(src) - vr.pushElement(t) - - return c.CopyValue(dst, vr) -} - -// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a -// []byte. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead. -func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { - return c.AppendValueBytes(nil, src) -} - -// AppendValueBytes functions the same as CopyValueToBytes, but will append the -// result to dst. -// -// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go -// Driver 2.0. -func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { - if br, ok := src.(BytesReader); ok { - return br.ReadValueBytes(dst) - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - start := len(dst) - - vw.reset(dst) - vw.push(mElement) - - err := c.CopyValue(vw, src) - if err != nil { - return 0, dst, err - } - - return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil -} - -// CopyValue will copy a single value from src to dst. -// -// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { - var err error - switch src.Type() { - case bsontype.Double: - var f64 float64 - f64, err = src.ReadDouble() - if err != nil { - break - } - err = dst.WriteDouble(f64) - case bsontype.String: - var str string - str, err = src.ReadString() - if err != nil { - return err - } - err = dst.WriteString(str) - case bsontype.EmbeddedDocument: - err = c.CopyDocument(dst, src) - case bsontype.Array: - err = c.copyArray(dst, src) - case bsontype.Binary: - var data []byte - var subtype byte - data, subtype, err = src.ReadBinary() - if err != nil { - break - } - err = dst.WriteBinaryWithSubtype(data, subtype) - case bsontype.Undefined: - err = src.ReadUndefined() - if err != nil { - break - } - err = dst.WriteUndefined() - case bsontype.ObjectID: - var oid primitive.ObjectID - oid, err = src.ReadObjectID() - if err != nil { - break - } - err = dst.WriteObjectID(oid) - case bsontype.Boolean: - var b bool - b, err = src.ReadBoolean() - if err != nil { - break - } - err = dst.WriteBoolean(b) - case bsontype.DateTime: - var dt int64 - dt, err = src.ReadDateTime() - if err != nil { - break - } - err = dst.WriteDateTime(dt) - case bsontype.Null: - err = src.ReadNull() - if err != nil { - break - } - err = dst.WriteNull() - case bsontype.Regex: - var pattern, options string - pattern, options, err = src.ReadRegex() - if err != nil { - break - } - err = dst.WriteRegex(pattern, options) - case bsontype.DBPointer: - var ns string - var pointer primitive.ObjectID - ns, pointer, err = src.ReadDBPointer() - if err != nil { - break - } - err = dst.WriteDBPointer(ns, pointer) - case bsontype.JavaScript: - var js string - js, err = src.ReadJavascript() - if err != nil { - break - } - err = dst.WriteJavascript(js) - case bsontype.Symbol: - var symbol string - symbol, err = src.ReadSymbol() - if err != nil { - break - } - err = dst.WriteSymbol(symbol) - case bsontype.CodeWithScope: - var code string - var srcScope DocumentReader - code, srcScope, err = src.ReadCodeWithScope() - if err != nil { - break - } - - var dstScope DocumentWriter - dstScope, err = dst.WriteCodeWithScope(code) - if err != nil { - break - } - err = c.copyDocumentCore(dstScope, srcScope) - case bsontype.Int32: - var i32 int32 - i32, err = src.ReadInt32() - if err != nil { - break - } - err = dst.WriteInt32(i32) - case bsontype.Timestamp: - var t, i uint32 - t, i, err = src.ReadTimestamp() - if err != nil { - break - } - err = dst.WriteTimestamp(t, i) - case bsontype.Int64: - var i64 int64 - i64, err = src.ReadInt64() - if err != nil { - break - } - err = dst.WriteInt64(i64) - case bsontype.Decimal128: - var d128 primitive.Decimal128 - d128, err = src.ReadDecimal128() - if err != nil { - break - } - err = dst.WriteDecimal128(d128) - case bsontype.MinKey: - err = src.ReadMinKey() - if err != nil { - break - } - err = dst.WriteMinKey() - case bsontype.MaxKey: - err = src.ReadMaxKey() - if err != nil { - break - } - err = dst.WriteMaxKey() - default: - err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) - } - - return err -} - -func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { - ar, err := src.ReadArray() - if err != nil { - return err - } - - aw, err := dst.WriteArray() - if err != nil { - return err - } - - for { - vr, err := ar.ReadValue() - if errors.Is(err, ErrEOA) { - break - } - if err != nil { - return err - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, ErrEOD) { - break - } - if err != nil { - return err - } - - vw, err := dw.WriteDocumentElement(key) - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go deleted file mode 100644 index 750b0d2a..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonrw contains abstractions for reading and writing -// BSON and BSON like types from sources. -package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go deleted file mode 100644 index bb52a0ec..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -const maxNestingDepth = 200 - -// ErrInvalidJSON indicates the JSON input is invalid -var ErrInvalidJSON = errors.New("invalid JSON input") - -type jsonParseState byte - -const ( - jpsStartState jsonParseState = iota - jpsSawBeginObject - jpsSawEndObject - jpsSawBeginArray - jpsSawEndArray - jpsSawColon - jpsSawComma - jpsSawKey - jpsSawValue - jpsDoneState - jpsInvalidState -) - -type jsonParseMode byte - -const ( - jpmInvalidMode jsonParseMode = iota - jpmObjectMode - jpmArrayMode -) - -type extJSONValue struct { - t bsontype.Type - v interface{} -} - -type extJSONObject struct { - keys []string - values []*extJSONValue -} - -type extJSONParser struct { - js *jsonScanner - s jsonParseState - m []jsonParseMode - k string - v *extJSONValue - - err error - canonical bool - depth int - maxDepth int - - emptyObject bool - relaxedUUID bool -} - -// newExtJSONParser returns a new extended JSON parser, ready to to begin -// parsing from the first character of the argued json input. It will not -// perform any read-ahead and will therefore not report any errors about -// malformed JSON at this point. -func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { - return &extJSONParser{ - js: &jsonScanner{r: r}, - s: jpsStartState, - m: []jsonParseMode{}, - canonical: canonical, - maxDepth: maxNestingDepth, - } -} - -// peekType examines the next value and returns its BSON Type -func (ejp *extJSONParser) peekType() (bsontype.Type, error) { - var t bsontype.Type - var err error - initialState := ejp.s - - ejp.advanceState() - switch ejp.s { - case jpsSawValue: - t = ejp.v.t - case jpsSawBeginArray: - t = bsontype.Array - case jpsInvalidState: - err = ejp.err - case jpsSawComma: - // in array mode, seeing a comma means we need to progress again to actually observe a type - if ejp.peekMode() == jpmArrayMode { - return ejp.peekType() - } - case jpsSawEndArray: - // this would only be a valid state if we were in array mode, so return end-of-array error - err = ErrEOA - case jpsSawBeginObject: - // peek key to determine type - ejp.advanceState() - switch ejp.s { - case jpsSawEndObject: // empty embedded document - t = bsontype.EmbeddedDocument - ejp.emptyObject = true - case jpsInvalidState: - err = ejp.err - case jpsSawKey: - if initialState == jpsStartState { - return bsontype.EmbeddedDocument, nil - } - t = wrapperKeyBSONType(ejp.k) - - // if $uuid is encountered, parse as binary subtype 4 - if ejp.k == "$uuid" { - ejp.relaxedUUID = true - t = bsontype.Binary - } - - switch t { - case bsontype.JavaScript: - // just saw $code, need to check for $scope at same level - _, err = ejp.readValue(bsontype.JavaScript) - if err != nil { - break - } - - switch ejp.s { - case jpsSawEndObject: // type is TypeJavaScript - case jpsSawComma: - ejp.advanceState() - - if ejp.s == jpsSawKey && ejp.k == "$scope" { - t = bsontype.CodeWithScope - } else { - err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) - } - case jpsInvalidState: - err = ejp.err - default: - err = ErrInvalidJSON - } - case bsontype.CodeWithScope: - err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope") - } - } - } - - return t, err -} - -// readKey parses the next key and its type and returns them -func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { - if ejp.emptyObject { - ejp.emptyObject = false - return "", 0, ErrEOD - } - - // advance to key (or return with error) - switch ejp.s { - case jpsStartState: - ejp.advanceState() - if ejp.s == jpsSawBeginObject { - ejp.advanceState() - } - case jpsSawBeginObject: - ejp.advanceState() - case jpsSawValue, jpsSawEndObject, jpsSawEndArray: - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject, jpsSawComma: - ejp.advanceState() - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsDoneState: - return "", 0, io.EOF - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, ErrInvalidJSON - } - case jpsSawKey: // do nothing (key was peeked before) - default: - return "", 0, invalidRequestError("key") - } - - // read key - var key string - - switch ejp.s { - case jpsSawKey: - key = ejp.k - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, invalidRequestError("key") - } - - // check for colon - ejp.advanceState() - if err := ensureColon(ejp.s, key); err != nil { - return "", 0, err - } - - // peek at the value to determine type - t, err := ejp.peekType() - if err != nil { - return "", 0, err - } - - return key, t, nil -} - -// readValue returns the value corresponding to the Type returned by peekType -func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { - if ejp.s == jpsInvalidState { - return nil, ejp.err - } - - var v *extJSONValue - - switch t { - case bsontype.Null, bsontype.Boolean, bsontype.String: - if ejp.s != jpsSawValue { - return nil, invalidRequestError(t.String()) - } - v = ejp.v - case bsontype.Int32, bsontype.Int64, bsontype.Double: - // relaxed version allows these to be literal number values - if ejp.s == jpsSawValue { - v = ejp.v - break - } - fallthrough - case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { - return nil, invalidJSONErrorForType("value", t) - } - - v = ejp.v - - // read end object - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("} after value", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: - if ejp.s != jpsSawKey { - return nil, invalidRequestError(t.String()) - } - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - if t == bsontype.Binary && ejp.s == jpsSawValue { - // convert relaxed $uuid format - if ejp.relaxedUUID { - defer func() { ejp.relaxedUUID = false }() - uuid, err := ejp.v.parseSymbol() - if err != nil { - return nil, err - } - - // RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing - // in the 8th, 13th, 18th, and 23rd characters. - // - // See https://tools.ietf.org/html/rfc4122#section-3 - valid := len(uuid) == 36 && - string(uuid[8]) == "-" && - string(uuid[13]) == "-" && - string(uuid[18]) == "-" && - string(uuid[23]) == "-" - if !valid { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // remove hyphens - uuidNoHyphens := strings.Replace(uuid, "-", "", -1) - if len(uuidNoHyphens) != 32 { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // convert hex to bytes - bytes, err := hex.DecodeString(uuidNoHyphens) - if err != nil { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary) - } - - base64 := &extJSONValue{ - t: bsontype.String, - v: base64.StdEncoding.EncodeToString(bytes), - } - subType := &extJSONValue{ - t: bsontype.String, - v: "04", - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - - break - } - - // convert legacy $binary format - base64 := ejp.v - - ejp.advanceState() - if ejp.s != jpsSawComma { - return nil, invalidJSONErrorForType(",", bsontype.Binary) - } - - ejp.advanceState() - key, t, err := ejp.readKey() - if err != nil { - return nil, err - } - if key != "$type" { - return nil, invalidJSONErrorForType("$type", bsontype.Binary) - } - - subType, err := ejp.readValue(t) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - break - } - - // read KV pairs - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONErrorForType("{", t) - } - - keys, vals, err := ejp.readObject(2, true) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) - } - - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - - case bsontype.DateTime: - switch ejp.s { - case jpsSawValue: - v = ejp.v - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject: - keys, vals, err := ejp.readObject(1, true) - if err != nil { - return nil, err - } - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - case jpsSawValue: - if ejp.canonical { - return nil, invalidJSONError("{") - } - v = ejp.v - default: - if ejp.canonical { - return nil, invalidJSONErrorForType("object", t) - } - return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("value and then }", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.JavaScript: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue { - return nil, invalidJSONErrorForType("value", t) - } - v = ejp.v - - // read end object or comma and just return - ejp.advanceState() - case jpsSawEndObject: - v = ejp.v - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.CodeWithScope: - if ejp.s == jpsSawKey && ejp.k == "$scope" { - v = ejp.v // this is the $code string from earlier - - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONError("$scope to be embedded document") - } - } else { - return nil, invalidRequestError(t.String()) - } - case bsontype.EmbeddedDocument, bsontype.Array: - return nil, invalidRequestError(t.String()) - } - - return v, nil -} - -// readObject is a utility method for reading full objects of known (or expected) size -// it is useful for extended JSON types such as binary, datetime, regex, and timestamp -func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { - keys := make([]string, numKeys) - vals := make([]*extJSONValue, numKeys) - - if !started { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, nil, invalidJSONError("{") - } - } - - for i := 0; i < numKeys; i++ { - key, t, err := ejp.readKey() - if err != nil { - return nil, nil, err - } - - switch ejp.s { - case jpsSawKey: - v, err := ejp.readValue(t) - if err != nil { - return nil, nil, err - } - - keys[i] = key - vals[i] = v - case jpsSawValue: - keys[i] = key - vals[i] = ejp.v - default: - return nil, nil, invalidJSONError("value") - } - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, nil, invalidJSONError("}") - } - - return keys, vals, nil -} - -// advanceState reads the next JSON token from the scanner and transitions -// from the current state based on that token's type -func (ejp *extJSONParser) advanceState() { - if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { - return - } - - jt, err := ejp.js.nextToken() - - if err != nil { - ejp.err = err - ejp.s = jpsInvalidState - return - } - - valid := ejp.validateToken(jt.t) - if !valid { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - return - } - - switch jt.t { - case jttBeginObject: - ejp.s = jpsSawBeginObject - ejp.pushMode(jpmObjectMode) - ejp.depth++ - - if ejp.depth > ejp.maxDepth { - ejp.err = nestingDepthError(jt.p, ejp.depth) - ejp.s = jpsInvalidState - } - case jttEndObject: - ejp.s = jpsSawEndObject - ejp.depth-- - - if ejp.popMode() != jpmObjectMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttBeginArray: - ejp.s = jpsSawBeginArray - ejp.pushMode(jpmArrayMode) - case jttEndArray: - ejp.s = jpsSawEndArray - - if ejp.popMode() != jpmArrayMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttColon: - ejp.s = jpsSawColon - case jttComma: - ejp.s = jpsSawComma - case jttEOF: - ejp.s = jpsDoneState - if len(ejp.m) != 0 { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttString: - switch ejp.s { - case jpsSawComma: - if ejp.peekMode() == jpmArrayMode { - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - return - } - fallthrough - case jpsSawBeginObject: - ejp.s = jpsSawKey - ejp.k = jt.v.(string) - return - } - fallthrough - default: - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - } -} - -var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ - jpsStartState: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - jttEOF: true, - }, - jpsSawBeginObject: { - jttEndObject: true, - jttString: true, - }, - jpsSawEndObject: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawBeginArray: { - jttBeginObject: true, - jttBeginArray: true, - jttEndArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawEndArray: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawColon: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawComma: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawKey: { - jttColon: true, - }, - jpsSawValue: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsDoneState: {}, - jpsInvalidState: {}, -} - -func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { - switch ejp.s { - case jpsSawEndObject: - // if we are at depth zero and the next token is a '{', - // we can consider it valid only if we are not in array mode. - if jtt == jttBeginObject && ejp.depth == 0 { - return ejp.peekMode() != jpmArrayMode - } - case jpsSawComma: - switch ejp.peekMode() { - // the only valid next token after a comma inside a document is a string (a key) - case jpmObjectMode: - return jtt == jttString - case jpmInvalidMode: - return false - } - } - - _, ok := jpsValidTransitionTokens[ejp.s][jtt] - return ok -} - -// ensureExtValueType returns true if the current value has the expected -// value type for single-key extended JSON types. For example, -// {"$numberInt": v} v must be TypeString -func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { - switch t { - case bsontype.MinKey, bsontype.MaxKey: - return ejp.v.t == bsontype.Int32 - case bsontype.Undefined: - return ejp.v.t == bsontype.Boolean - case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: - return ejp.v.t == bsontype.String - default: - return false - } -} - -func (ejp *extJSONParser) pushMode(m jsonParseMode) { - ejp.m = append(ejp.m, m) -} - -func (ejp *extJSONParser) popMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - m := ejp.m[l-1] - ejp.m = ejp.m[:l-1] - - return m -} - -func (ejp *extJSONParser) peekMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - return ejp.m[l-1] -} - -func extendJSONToken(jt *jsonToken) *extJSONValue { - var t bsontype.Type - - switch jt.t { - case jttInt32: - t = bsontype.Int32 - case jttInt64: - t = bsontype.Int64 - case jttDouble: - t = bsontype.Double - case jttString: - t = bsontype.String - case jttBool: - t = bsontype.Boolean - case jttNull: - t = bsontype.Null - default: - return nil - } - - return &extJSONValue{t: t, v: jt.v} -} - -func ensureColon(s jsonParseState, key string) error { - if s != jpsSawColon { - return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) - } - - return nil -} - -func invalidRequestError(s string) error { - return fmt.Errorf("invalid request to read %s", s) -} - -func invalidJSONError(expected string) error { - return fmt.Errorf("invalid JSON input; expected %s", expected) -} - -func invalidJSONErrorForType(expected string, t bsontype.Type) error { - return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) -} - -func unexpectedTokenError(jt *jsonToken) error { - switch jt.t { - case jttInt32, jttInt64, jttDouble: - return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) - case jttString: - return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) - case jttBool: - return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) - case jttNull: - return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) - case jttEOF: - return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) - default: - return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) - } -} - -func nestingDepthError(p, depth int) error { - return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go deleted file mode 100644 index 59ddfc44..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ /dev/null @@ -1,653 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "errors" - "fmt" - "io" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -type ExtJSONValueReaderPool struct { - pool sync.Pool -} - -// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { - return &ExtJSONValueReaderPool{ - pool: sync.Pool{ - New: func() interface{} { - return new(extJSONValueReader) - }, - }, - } -} - -// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { - vr := bvrp.pool.Get().(*extJSONValueReader) - return vr.reset(r, canonical) -} - -// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing -// is inserted into the pool and ok will be false. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { - bvr, ok := vr.(*extJSONValueReader) - if !ok { - return false - } - - bvr, _ = bvr.reset(nil, false) - bvrp.pool.Put(bvr) - return true -} - -type ejvrState struct { - mode mode - vType bsontype.Type - depth int -} - -// extJSONValueReader is for reading extended JSON. -type extJSONValueReader struct { - p *extJSONParser - - stack []ejvrState - frame int -} - -// NewExtJSONValueReader creates a new ValueReader from a given io.Reader -// It will interpret the JSON of r as canonical or relaxed according to the -// given canonical flag -func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { - return newExtJSONValueReader(r, canonical) -} - -func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { - ejvr := new(extJSONValueReader) - return ejvr.reset(r, canonical) -} - -func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { - p := newExtJSONParser(r, canonical) - typ, err := p.peekType() - - if err != nil { - return nil, ErrInvalidJSON - } - - var m mode - switch typ { - case bsontype.EmbeddedDocument: - m = mTopLevel - case bsontype.Array: - m = mArray - default: - m = mValue - } - - stack := make([]ejvrState, 1, 5) - stack[0] = ejvrState{ - mode: m, - vType: typ, - } - return &extJSONValueReader{ - p: p, - stack: stack, - }, nil -} - -func (ejvr *extJSONValueReader) advanceFrame() { - if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack - length := len(ejvr.stack) - if length+1 >= cap(ejvr.stack) { - // double it - buf := make([]ejvrState, 2*cap(ejvr.stack)+1) - copy(buf, ejvr.stack) - ejvr.stack = buf - } - ejvr.stack = ejvr.stack[:length+1] - } - ejvr.frame++ - - // Clean the stack - ejvr.stack[ejvr.frame].mode = 0 - ejvr.stack[ejvr.frame].vType = 0 - ejvr.stack[ejvr.frame].depth = 0 -} - -func (ejvr *extJSONValueReader) pushDocument() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mDocument - ejvr.stack[ejvr.frame].depth = ejvr.p.depth -} - -func (ejvr *extJSONValueReader) pushCodeWithScope() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mCodeWithScope -} - -func (ejvr *extJSONValueReader) pushArray() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mArray -} - -func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = m - ejvr.stack[ejvr.frame].vType = t -} - -func (ejvr *extJSONValueReader) pop() { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - ejvr.frame-- - case mDocument, mArray, mCodeWithScope: - ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... - } -} - -func (ejvr *extJSONValueReader) skipObject() { - // read entire object until depth returns to 0 (last ending } or ] seen) - depth := 1 - for depth > 0 { - ejvr.p.advanceState() - - // If object is empty, raise depth and continue. When emptyObject is true, the - // parser has already read both the opening and closing brackets of an empty - // object ("{}"), so the next valid token will be part of the parent document, - // not part of the nested document. - // - // If there is a comma, there are remaining fields, emptyObject must be set back - // to false, and comma must be skipped with advanceState(). - if ejvr.p.emptyObject { - if ejvr.p.s == jpsSawComma { - ejvr.p.emptyObject = false - ejvr.p.advanceState() - } - depth-- - continue - } - - switch ejvr.p.s { - case jpsSawBeginObject, jpsSawBeginArray: - depth++ - case jpsSawEndObject, jpsSawEndArray: - depth-- - } - } -} - -func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { - te := TransitionError{ - name: name, - current: ejvr.stack[ejvr.frame].mode, - destination: destination, - modes: modes, - action: "read", - } - if ejvr.frame != 0 { - te.parent = ejvr.stack[ejvr.frame-1].mode - } - return te -} - -func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { - return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) -} - -func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != t { - return ejvr.typeError(t) - } - default: - modes := []mode{mElement, mValue} - if addModes != nil { - modes = append(modes, addModes...) - } - return ejvr.invalidTransitionErr(destination, callerName, modes) - } - - return nil -} - -func (ejvr *extJSONValueReader) Type() bsontype.Type { - return ejvr.stack[ejvr.frame].vType -} - -func (ejvr *extJSONValueReader) Skip() error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - default: - return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) - } - - defer ejvr.pop() - - t := ejvr.stack[ejvr.frame].vType - switch t { - case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope: - // read entire array, doc or CodeWithScope - ejvr.skipObject() - default: - _, err := ejvr.p.readValue(t) - if err != nil { - return err - } - } - - return nil -} - -func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: // allow reading array from top level - case mArray: - return ejvr, nil - default: - if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { - return nil, err - } - } - - ejvr.pushArray() - - return ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { - if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { - return nil, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Binary) - if err != nil { - return nil, 0, err - } - - b, btype, err = v.parseBinary() - - ejvr.pop() - return b, btype, err -} - -func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { - if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { - return false, err - } - - v, err := ejvr.p.readValue(bsontype.Boolean) - if err != nil { - return false, err - } - - if v.t != bsontype.Boolean { - return false, fmt.Errorf("expected type bool, but got type %s", v.t) - } - - ejvr.pop() - return v.v.(bool), nil -} - -func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: - return ejvr, nil - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { - return nil, ejvr.typeError(bsontype.EmbeddedDocument) - } - - ejvr.pushDocument() - return ejvr, nil - default: - return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) - } -} - -func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { - if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { - return "", nil, err - } - - v, err := ejvr.p.readValue(bsontype.CodeWithScope) - if err != nil { - return "", nil, err - } - - code, err = v.parseJavascript() - - ejvr.pushCodeWithScope() - return code, ejvr, err -} - -func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { - if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { - return "", primitive.NilObjectID, err - } - - v, err := ejvr.p.readValue(bsontype.DBPointer) - if err != nil { - return "", primitive.NilObjectID, err - } - - ns, oid, err = v.parseDBPointer() - - ejvr.pop() - return ns, oid, err -} - -func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.DateTime) - if err != nil { - return 0, err - } - - d, err := v.parseDateTime() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { - if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { - return primitive.Decimal128{}, err - } - - v, err := ejvr.p.readValue(bsontype.Decimal128) - if err != nil { - return primitive.Decimal128{}, err - } - - d, err := v.parseDecimal128() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { - if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Double) - if err != nil { - return 0, err - } - - d, err := v.parseDouble() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { - if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int32) - if err != nil { - return 0, err - } - - i, err := v.parseInt32() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int64) - if err != nil { - return 0, err - } - - i, err := v.parseInt64() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { - if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.JavaScript) - if err != nil { - return "", err - } - - code, err = v.parseJavascript() - - ejvr.pop() - return code, err -} - -func (ejvr *extJSONValueReader) ReadMaxKey() error { - if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MaxKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("max") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadMinKey() error { - if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MinKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("min") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadNull() error { - if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Null) - if err != nil { - return err - } - - if v.t != bsontype.Null { - return fmt.Errorf("expected type null but got type %s", v.t) - } - - ejvr.pop() - return nil -} - -func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { - if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { - return primitive.ObjectID{}, err - } - - v, err := ejvr.p.readValue(bsontype.ObjectID) - if err != nil { - return primitive.ObjectID{}, err - } - - oid, err := v.parseObjectID() - - ejvr.pop() - return oid, err -} - -func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { - if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { - return "", "", err - } - - v, err := ejvr.p.readValue(bsontype.Regex) - if err != nil { - return "", "", err - } - - pattern, options, err = v.parseRegex() - - ejvr.pop() - return pattern, options, err -} - -func (ejvr *extJSONValueReader) ReadString() (string, error) { - if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.String) - if err != nil { - return "", err - } - - if v.t != bsontype.String { - return "", fmt.Errorf("expected type string but got type %s", v.t) - } - - ejvr.pop() - return v.v.(string), nil -} - -func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { - if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.Symbol) - if err != nil { - return "", err - } - - symbol, err = v.parseSymbol() - - ejvr.pop() - return symbol, err -} - -func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { - if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { - return 0, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Timestamp) - if err != nil { - return 0, 0, err - } - - t, i, err = v.parseTimestamp() - - ejvr.pop() - return t, i, err -} - -func (ejvr *extJSONValueReader) ReadUndefined() error { - if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Undefined) - if err != nil { - return err - } - - err = v.parseUndefined() - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel, mDocument, mCodeWithScope: - default: - return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) - } - - name, t, err := ejvr.p.readKey() - - if err != nil { - if errors.Is(err, ErrEOD) { - if ejvr.stack[ejvr.frame].mode == mCodeWithScope { - _, err := ejvr.p.peekType() - if err != nil { - return "", nil, err - } - } - - ejvr.pop() - } - - return "", nil, err - } - - ejvr.push(mElement, t) - return name, ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mArray: - default: - return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) - } - - t, err := ejvr.p.peekType() - if err != nil { - if errors.Is(err, ErrEOA) { - ejvr.pop() - } - - return nil, err - } - - ejvr.push(mValue, t) - return ejvr, nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go deleted file mode 100644 index ba39c960..00000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. - -package bsonrw - -import "unicode/utf8" - -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML