From 85513694701b6cd7b03ef613e27a310c2a8bb71b Mon Sep 17 00:00:00 2001 From: Bob Du Date: Thu, 13 Nov 2025 19:39:16 +0800 Subject: [PATCH] Use dedicated moby api package Signed-off-by: Bob Du --- go.mod | 14 +- go.sum | 88 +- pkg/v1/daemon/image.go | 2 +- pkg/v1/daemon/image_test.go | 6 +- pkg/v1/daemon/options.go | 6 +- pkg/v1/daemon/write.go | 2 +- pkg/v1/daemon/write_test.go | 4 +- vendor/github.com/docker/docker/AUTHORS | 2496 --- vendor/github.com/docker/docker/NOTICE | 19 - vendor/github.com/docker/docker/api/README.md | 42 - vendor/github.com/docker/docker/api/common.go | 20 - .../docker/docker/api/swagger-gen.yaml | 12 - .../github.com/docker/docker/api/swagger.yaml | 13431 ---------------- .../docker/api/types/build/disk_usage.go | 8 - .../docker/api/types/checkpoint/options.go | 19 - .../docker/docker/api/types/client.go | 85 - .../docker/api/types/container/disk_usage.go | 8 - .../docker/docker/api/types/container/exec.go | 51 - .../api/types/container/network_settings.go | 56 - .../docker/api/types/container/options.go | 67 - .../docker/docker/api/types/filters/errors.go | 24 - .../docker/docker/api/types/filters/parse.go | 336 - .../docker/api/types/image/disk_usage.go | 8 - .../docker/docker/api/types/image/image.go | 47 - .../docker/docker/api/types/image/opts.go | 122 - .../docker/api/types/network/endpoint.go | 153 - .../docker/docker/api/types/network/ipam.go | 134 - .../docker/api/types/network/network.go | 168 - .../docker/api/types/plugin_interface_type.go | 21 - .../docker/api/types/plugin_responses.go | 71 - .../docker/api/types/registry/authconfig.go | 109 - .../docker/api/types/registry/authenticate.go | 21 - .../docker/api/types/registry/registry.go | 122 - .../docker/api/types/strslice/strslice.go | 30 - .../docker/api/types/swarm/runtime/gen.go | 3 - .../api/types/swarm/runtime/plugin.pb.go | 808 - .../api/types/swarm/runtime/plugin.proto | 19 - .../docker/api/types/system/disk_usage.go | 17 - .../docker/api/types/system/security_opts.go | 48 - .../docker/docker/api/types/types.go | 103 - .../docker/api/types/types_deprecated.go | 241 - .../docker/api/types/volume/disk_usage.go | 8 - .../docker/docker/api/types/volume/options.go | 15 - .../docker/api/types/volume/volume_update.go | 7 - .../github.com/docker/docker/client/README.md | 38 - .../docker/docker/client/build_cancel.go | 16 - .../docker/docker/client/build_prune.go | 56 - .../docker/docker/client/checkpoint.go | 18 - .../docker/docker/client/checkpoint_create.go | 19 - .../docker/docker/client/checkpoint_delete.go | 25 - .../docker/docker/client/checkpoint_list.go | 28 - .../docker/docker/client/client_deprecated.go | 27 - .../docker/docker/client/client_interfaces.go | 237 - .../docker/docker/client/client_unix.go | 7 - .../docker/docker/client/client_windows.go | 5 - .../docker/docker/client/config_create.go | 24 - .../docker/docker/client/config_inspect.go | 37 - .../docker/docker/client/config_list.go | 37 - .../docker/docker/client/config_remove.go | 17 - .../docker/docker/client/config_update.go | 24 - .../docker/docker/client/container_attach.go | 65 - .../docker/docker/client/container_create.go | 168 - .../docker/docker/client/container_exec.go | 81 - .../docker/docker/client/container_export.go | 24 - .../docker/docker/client/container_inspect.go | 57 - .../docker/docker/client/container_kill.go | 23 - .../docker/docker/client/container_logs.go | 85 - .../docker/docker/client/container_pause.go | 15 - .../docker/docker/client/container_prune.go | 35 - .../docker/docker/client/container_rename.go | 20 - .../docker/docker/client/container_resize.go | 38 - .../docker/docker/client/container_restart.go | 41 - .../docker/docker/client/container_start.go | 28 - .../docker/docker/client/container_stats.go | 56 - .../docker/docker/client/container_stop.go | 45 - .../docker/docker/client/container_top.go | 33 - .../docker/docker/client/container_unpause.go | 15 - .../docker/docker/client/container_update.go | 26 - .../docker/docker/client/container_wait.go | 122 - .../docker/docker/client/disk_usage.go | 33 - .../docker/client/distribution_inspect.go | 39 - .../github.com/docker/docker/client/events.go | 100 - .../docker/docker/client/image_create.go | 40 - .../docker/docker/client/image_import.go | 48 - .../docker/docker/client/image_inspect.go | 76 - .../docker/docker/client/image_list.go | 67 - .../docker/docker/client/image_load.go | 54 - .../docker/docker/client/image_prune.go | 35 - .../docker/docker/client/image_pull.go | 64 - .../docker/docker/client/image_push.go | 72 - .../docker/docker/client/image_tag.go | 37 - .../github.com/docker/docker/client/info.go | 26 - .../github.com/docker/docker/client/login.go | 24 - .../docker/docker/client/network_connect.go | 28 - .../docker/docker/client/network_create.go | 40 - .../docker/client/network_disconnect.go | 28 - .../docker/docker/client/network_inspect.go | 47 - .../docker/docker/client/network_list.go | 32 - .../docker/docker/client/network_prune.go | 35 - .../docker/docker/client/network_remove.go | 14 - .../docker/docker/client/node_inspect.go | 33 - .../docker/docker/client/node_list.go | 34 - .../docker/docker/client/node_update.go | 22 - .../github.com/docker/docker/client/ping.go | 81 - .../docker/docker/client/plugin_create.go | 23 - .../docker/docker/client/plugin_disable.go | 23 - .../docker/docker/client/plugin_enable.go | 23 - .../docker/docker/client/plugin_inspect.go | 32 - .../docker/docker/client/plugin_install.go | 117 - .../docker/docker/client/plugin_list.go | 33 - .../docker/docker/client/plugin_push.go | 24 - .../docker/docker/client/plugin_remove.go | 25 - .../docker/docker/client/plugin_set.go | 17 - .../docker/docker/client/plugin_upgrade.go | 47 - .../docker/docker/client/secret_create.go | 24 - .../docker/docker/client/secret_inspect.go | 37 - .../docker/docker/client/secret_list.go | 37 - .../docker/docker/client/secret_remove.go | 17 - .../docker/docker/client/secret_update.go | 24 - .../docker/docker/client/service_inspect.go | 38 - .../docker/docker/client/service_list.go | 38 - .../docker/docker/client/service_logs.go | 57 - .../docker/docker/client/service_remove.go | 15 - .../docker/docker/client/service_update.go | 89 - .../docker/client/swarm_get_unlock_key.go | 21 - .../docker/docker/client/swarm_init.go | 21 - .../docker/docker/client/swarm_inspect.go | 21 - .../docker/docker/client/swarm_join.go | 14 - .../docker/docker/client/swarm_leave.go | 17 - .../docker/docker/client/swarm_unlock.go | 14 - .../docker/docker/client/swarm_update.go | 21 - .../docker/docker/client/task_inspect.go | 34 - .../docker/docker/client/task_list.go | 34 - .../docker/docker/client/task_logs.go | 51 - .../docker/docker/client/version.go | 21 - .../docker/docker/client/volume_create.go | 21 - .../docker/docker/client/volume_inspect.go | 40 - .../docker/docker/client/volume_list.go | 33 - .../docker/docker/client/volume_prune.go | 35 - .../docker/docker/client/volume_remove.go | 34 - .../docker/docker/client/volume_update.go | 28 - .../docker/internal/lazyregexp/lazyregexp.go | 90 - .../docker/internal/multierror/multierror.go | 46 - .../docker/go-connections/nat/nat.go | 240 - .../docker/go-connections/nat/parse.go | 33 - .../docker/go-connections/nat/sort.go | 96 - .../docker/go-connections/sockets/README.md | 0 .../docker/go-connections/sockets/proxy.go | 9 +- .../docker/go-connections/sockets/sockets.go | 31 +- .../go-connections/sockets/sockets_unix.go | 23 +- .../go-connections/sockets/sockets_windows.go | 7 +- .../go-connections/sockets/unix_socket.go | 44 +- .../sockets/unix_socket_unix.go | 54 + .../sockets/unix_socket_windows.go | 7 + .../docker/go-connections/tlsconfig/config.go | 100 +- .../tlsconfig/config_client_ciphers.go | 14 - vendor/github.com/gogo/protobuf/AUTHORS | 15 - vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 - vendor/github.com/gogo/protobuf/LICENSE | 35 - .../github.com/gogo/protobuf/proto/Makefile | 43 - .../github.com/gogo/protobuf/proto/clone.go | 258 - .../gogo/protobuf/proto/custom_gogo.go | 39 - .../github.com/gogo/protobuf/proto/decode.go | 427 - .../gogo/protobuf/proto/deprecated.go | 63 - .../github.com/gogo/protobuf/proto/discard.go | 350 - .../gogo/protobuf/proto/duration.go | 100 - .../gogo/protobuf/proto/duration_gogo.go | 49 - .../github.com/gogo/protobuf/proto/encode.go | 205 - .../gogo/protobuf/proto/encode_gogo.go | 33 - .../github.com/gogo/protobuf/proto/equal.go | 300 - .../gogo/protobuf/proto/extensions.go | 605 - .../gogo/protobuf/proto/extensions_gogo.go | 389 - vendor/github.com/gogo/protobuf/proto/lib.go | 973 -- .../gogo/protobuf/proto/lib_gogo.go | 50 - .../gogo/protobuf/proto/message_set.go | 181 - .../gogo/protobuf/proto/pointer_reflect.go | 357 - .../protobuf/proto/pointer_reflect_gogo.go | 59 - .../gogo/protobuf/proto/pointer_unsafe.go | 308 - .../protobuf/proto/pointer_unsafe_gogo.go | 56 - .../gogo/protobuf/proto/properties.go | 610 - .../gogo/protobuf/proto/properties_gogo.go | 36 - .../gogo/protobuf/proto/skip_gogo.go | 119 - .../gogo/protobuf/proto/table_marshal.go | 3009 ---- .../gogo/protobuf/proto/table_marshal_gogo.go | 388 - .../gogo/protobuf/proto/table_merge.go | 676 - .../gogo/protobuf/proto/table_unmarshal.go | 2249 --- .../protobuf/proto/table_unmarshal_gogo.go | 385 - vendor/github.com/gogo/protobuf/proto/text.go | 930 -- .../gogo/protobuf/proto/text_gogo.go | 57 - .../gogo/protobuf/proto/text_parser.go | 1018 -- .../gogo/protobuf/proto/timestamp.go | 113 - .../gogo/protobuf/proto/timestamp_gogo.go | 49 - .../gogo/protobuf/proto/wrappers.go | 1888 --- .../gogo/protobuf/proto/wrappers_gogo.go | 113 - .../{docker/docker => moby/moby/api}/LICENSE | 0 .../moby}/api/types/blkiodev/blkio.go | 2 +- .../moby/moby/api/types/build/build.go | 16 + .../moby}/api/types/build/cache.go | 17 - .../moby/moby/api/types/build/disk_usage.go | 36 + .../api/types/checkpoint/create_request.go | 8 + .../moby}/api/types/checkpoint/list.go | 0 .../moby/api/types/common}/error_response.go | 6 +- .../api/types/common}/error_response_ext.go | 2 +- .../moby}/api/types/common/id_response.go | 3 + .../moby}/api/types/container/change_type.go | 2 + .../moby}/api/types/container/change_types.go | 0 .../moby}/api/types/container/commit.go | 2 +- .../moby}/api/types/container/config.go | 43 +- .../moby}/api/types/container/container.go | 67 +- .../api/types/container/create_request.go | 2 +- .../api/types/container/create_response.go | 7 +- .../moby/api/types/container/disk_usage.go | 36 + .../moby}/api/types/container/errors.go | 0 .../moby/moby/api/types/container/exec.go | 35 + .../types/container/exec_create_request.go | 17 + .../api/types/container/exec_start_request.go | 12 + .../api/types/container/filesystem_change.go | 2 + .../moby}/api/types/container/health.go | 15 +- .../moby}/api/types/container/hostconfig.go | 36 +- .../api/types/container/hostconfig_unix.go | 4 +- .../api/types/container/hostconfig_windows.go | 4 +- .../api/types/container/network_settings.go | 22 + .../moby/api/types/container/port_summary.go} | 18 +- .../moby}/api/types/container/state.go | 42 +- .../moby}/api/types/container/stats.go | 85 +- .../moby}/api/types/container/top_response.go | 5 + .../api/types/container/update_response.go | 4 + .../api/types/container/wait_exit_error.go | 3 + .../api/types/container/wait_response.go | 5 +- .../api/types/container/waitcondition.go | 2 +- .../moby}/api/types/events/events.go | 16 +- .../moby}/api/types/image/delete_response.go | 3 + .../moby/moby/api/types/image/disk_usage.go | 36 + .../moby/moby/api/types/image/image.go | 18 + .../moby}/api/types/image/image_history.go | 7 +- .../moby}/api/types/image/image_inspect.go | 46 +- .../moby}/api/types/image/manifest.go | 0 .../moby}/api/types/image/summary.go | 6 - .../moby/api/types/jsonstream/json_error.go | 12 + .../moby/moby/api/types/jsonstream/message.go | 15 + .../moby/api/types/jsonstream/progress.go | 10 + .../moby}/api/types/mount/mount.go | 2 +- .../api/types/network/config_reference.go | 20 + .../moby/api/types/network/connect_request.go | 20 + .../api/types/network/create_response.go | 6 +- .../api/types/network/disconnect_request.go | 21 + .../moby/moby/api/types/network/endpoint.go | 74 + .../api/types/network/endpoint_resource.go | 35 + .../moby/moby/api/types/network/hwaddr.go | 37 + .../moby/moby/api/types/network/inspect.go | 27 + .../moby/moby/api/types/network/ipam.go | 22 + .../moby/api/types/network/ipam_status.go | 16 + .../moby/moby/api/types/network/network.go | 100 + .../moby/api/types/network/network_types.go | 43 + .../moby/moby/api/types/network/peer_info.go | 24 + .../moby/moby/api/types/network/port.go | 346 + .../moby/api/types/network/service_info.go | 28 + .../moby/moby/api/types/network/status.go | 15 + .../moby/api/types/network/subnet_status.go | 20 + .../moby/moby/api/types/network/summary.go | 13 + .../moby/moby/api/types/network/task.go | 28 + .../moby/moby/api/types/plugin/.gitignore | 1 + .../moby/moby/api/types/plugin/capability.go | 55 + .../moby/api/types/plugin/device.go} | 12 +- .../moby/api/types/plugin/env.go} | 11 +- .../moby/api/types/plugin/mount.go} | 17 +- .../moby/api/types/plugin}/plugin.go | 118 +- .../moby/api/types/plugin/plugin_responses.go | 33 + .../moby/api/types/registry/auth_response.go | 21 + .../moby/api/types/registry/authconfig.go | 35 + .../moby/moby/api/types/registry/registry.go | 67 + .../moby}/api/types/registry/search.go | 21 - .../moby}/api/types/storage/driver_data.go | 4 + .../api/types/storage/root_f_s_storage.go | 16 + .../storage/root_f_s_storage_snapshot.go | 15 + .../moby/moby/api/types/storage/storage.go | 16 + .../moby}/api/types/swarm/common.go | 2 +- .../moby}/api/types/swarm/config.go | 9 +- .../moby}/api/types/swarm/container.go | 9 +- .../moby}/api/types/swarm/network.go | 42 +- .../moby}/api/types/swarm/node.go | 15 +- .../moby}/api/types/swarm/runtime.go | 20 +- .../moby}/api/types/swarm/secret.go | 11 +- .../moby}/api/types/swarm/service.go | 100 +- .../types/swarm/service_create_response.go | 4 + .../types/swarm/service_update_response.go | 4 + .../moby}/api/types/swarm/swarm.go | 24 +- .../moby}/api/types/swarm/task.go | 23 +- .../moby/moby/api/types/system/disk_usage.go | 31 + .../moby}/api/types/system/info.go | 17 +- .../moby}/api/types/system/runtime.go | 4 +- .../moby/api/types/system/version_response.go | 58 + .../github.com/moby/moby/api/types/types.go | 18 + .../moby}/api/types/volume/cluster_volume.go | 2 +- .../moby/api/types/volume/create_request.go} | 15 +- .../moby/moby/api/types/volume/disk_usage.go | 36 + .../moby}/api/types/volume/list_response.go | 8 +- .../moby/api/types/volume/prune_report.go | 8 + .../moby}/api/types/volume/volume.go | 14 +- vendor/github.com/moby/moby/client/LICENSE | 191 + vendor/github.com/moby/moby/client/README.md | 43 + vendor/github.com/moby/moby/client/auth.go | 14 + .../moby/moby/client/build_cancel.go | 21 + .../moby/moby/client/build_prune.go | 67 + .../moby/moby/client/checkpoint_create.go | 36 + .../moby/moby/client/checkpoint_list.go | 38 + .../moby/moby/client/checkpoint_remove.go | 34 + .../docker => moby/moby}/client/client.go | 242 +- .../moby/moby/client/client_interfaces.go | 242 + .../moby/moby/client/client_unix.go | 18 + .../moby/moby/client/client_windows.go | 17 + .../moby/moby/client/config_create.go | 34 + .../moby/moby/client/config_inspect.go | 35 + .../moby/moby/client/config_list.go | 38 + .../moby/moby/client/config_remove.go | 25 + .../moby/moby/client/config_update.go | 32 + .../moby/moby/client/container_attach.go | 86 + .../moby}/client/container_commit.go | 35 +- .../moby}/client/container_copy.go | 71 +- .../moby/moby/client/container_create.go | 125 + .../moby/moby/client/container_create_opts.go | 25 + .../moby}/client/container_diff.go | 14 +- .../moby/moby/client/container_diff_opts.go | 13 + .../moby/moby/client/container_exec.go | 205 + .../moby/moby/client/container_export.go | 47 + .../moby/moby/client/container_inspect.go | 47 + .../moby/moby/client/container_kill.go | 39 + .../moby}/client/container_list.go | 36 +- .../moby/moby/client/container_logs.go | 118 + .../moby/moby/client/container_pause.go | 28 + .../moby/moby/client/container_prune.go | 39 + .../moby}/client/container_remove.go | 25 +- .../moby/moby/client/container_rename.go | 39 + .../moby/moby/client/container_resize.go | 66 + .../moby/moby/client/container_restart.go | 54 + .../moby/moby/client/container_start.go | 40 + .../moby/moby/client/container_stats.go | 75 + .../moby/moby/client/container_stop.go | 58 + .../moby/moby/client/container_top.go | 44 + .../moby/moby/client/container_unpause.go | 28 + .../moby/moby/client/container_update.go | 46 + .../moby/moby/client/container_wait.go | 92 + .../moby/moby/client/distribution_inspect.go | 45 + .../docker => moby/moby}/client/envvars.go | 41 +- .../docker => moby/moby}/client/errors.go | 27 +- vendor/github.com/moby/moby/client/filters.go | 59 + .../docker => moby/moby}/client/hijack.go | 79 +- .../moby}/client/image_build.go | 39 +- .../moby/client/image_build_opts.go} | 36 +- .../moby}/client/image_history.go | 19 +- .../moby}/client/image_history_opts.go | 14 +- .../moby/moby/client/image_import.go | 66 + .../moby/moby/client/image_import_opts.go | 21 + .../moby/moby/client/image_inspect.go | 62 + .../moby}/client/image_inspect_opts.go | 33 +- .../github.com/moby/moby/client/image_list.go | 53 + .../moby/moby/client/image_list_opts.go | 24 + .../github.com/moby/moby/client/image_load.go | 64 + .../moby}/client/image_load_opts.go | 16 +- .../moby/moby/client/image_prune.go | 39 + .../github.com/moby/moby/client/image_pull.go | 93 + .../moby/moby/client/image_pull_opts.go | 25 + .../github.com/moby/moby/client/image_push.go | 98 + .../moby/moby/client/image_push_opts.go | 26 + .../moby}/client/image_remove.go | 12 +- .../moby/moby/client/image_remove_opts.go | 18 + .../docker => moby/moby}/client/image_save.go | 32 +- .../moby}/client/image_save_opts.go | 14 +- .../moby}/client/image_search.go | 21 +- .../moby/moby/client/image_search_opts.go | 27 + .../github.com/moby/moby/client/image_tag.go | 48 + .../moby/moby/client/internal/json-stream.go | 50 + .../moby/moby/client/internal/jsonmessages.go | 79 + .../client/internal/timestamp}/timestamp.go | 2 +- vendor/github.com/moby/moby/client/login.go | 45 + .../moby/moby/client/network_connect.go | 40 + .../moby/moby/client/network_create.go | 69 + .../moby/moby/client/network_disconnect.go | 40 + .../moby/moby/client/network_inspect.go | 39 + .../moby/moby/client/network_inspect_opts.go | 7 + .../moby/moby/client/network_list.go | 28 + .../moby/moby/client/network_list_opts.go | 6 + .../moby/moby/client/network_prune.go | 39 + .../moby/moby/client/network_remove.go | 26 + .../moby/moby/client/node_inspect.go | 41 + .../github.com/moby/moby/client/node_list.go | 33 + .../moby}/client/node_remove.go | 16 +- .../moby/moby/client/node_update.go | 30 + .../docker => moby/moby}/client/options.go | 102 +- vendor/github.com/moby/moby/client/ping.go | 151 + .../moby/client/pkg}/versions/compare.go | 2 +- .../moby/moby/client/plugin_create.go | 31 + .../moby/moby/client/plugin_disable.go | 31 + .../moby/moby/client/plugin_enable.go | 31 + .../moby/moby/client/plugin_inspect.go | 35 + .../moby/moby/client/plugin_install.go | 175 + .../moby/moby/client/plugin_list.go | 35 + .../moby/moby/client/plugin_push.go | 34 + .../moby/moby/client/plugin_remove.go | 33 + .../github.com/moby/moby/client/plugin_set.go | 27 + .../moby/moby/client/plugin_upgrade.go | 89 + .../docker => moby/moby}/client/request.go | 219 +- .../moby/moby/client/secret_create.go | 34 + .../moby/moby/client/secret_inspect.go | 35 + .../moby/moby/client/secret_list.go | 38 + .../moby/moby/client/secret_remove.go | 25 + .../moby/moby/client/secret_update.go | 32 + .../moby}/client/service_create.go | 118 +- .../moby/moby/client/service_inspect.go | 40 + .../moby/moby/client/service_list.go | 44 + .../moby/moby/client/service_logs.go | 91 + .../moby/moby/client/service_remove.go | 25 + .../moby/moby/client/service_update.go | 112 + .../moby/moby/client/swarm_get_unlock_key.go | 26 + .../github.com/moby/moby/client/swarm_init.go | 54 + .../moby/moby/client/swarm_inspect.go | 31 + .../github.com/moby/moby/client/swarm_join.go | 38 + .../moby/moby/client/swarm_leave.go | 25 + .../moby/moby/client/swarm_unlock.go | 25 + .../moby/moby/client/swarm_update.go | 33 + .../moby/moby/client/system_disk_usage.go | 332 + .../moby/moby/client/system_events.go | 114 + .../moby/moby/client/system_info.go | 34 + .../moby/moby/client/task_inspect.go | 36 + .../github.com/moby/moby/client/task_list.go | 36 + .../github.com/moby/moby/client/task_logs.go | 84 + .../docker => moby/moby}/client/utils.go | 73 +- vendor/github.com/moby/moby/client/version.go | 81 + .../moby/moby/client/volume_create.go | 42 + .../moby/moby/client/volume_inspect.go | 36 + .../moby/moby/client/volume_list.go | 46 + .../moby/moby/client/volume_prune.go | 55 + .../moby/moby/client/volume_remove.go | 36 + .../moby/moby/client/volume_update.go | 40 + vendor/modules.txt | 73 +- 435 files changed, 8851 insertions(+), 41317 deletions(-) delete mode 100644 vendor/github.com/docker/docker/AUTHORS delete mode 100644 vendor/github.com/docker/docker/NOTICE delete mode 100644 vendor/github.com/docker/docker/api/README.md delete mode 100644 vendor/github.com/docker/docker/api/common.go delete mode 100644 vendor/github.com/docker/docker/api/swagger-gen.yaml delete mode 100644 vendor/github.com/docker/docker/api/swagger.yaml delete mode 100644 vendor/github.com/docker/docker/api/types/build/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/api/types/checkpoint/options.go delete mode 100644 vendor/github.com/docker/docker/api/types/client.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/exec.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/network_settings.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/options.go delete mode 100644 vendor/github.com/docker/docker/api/types/filters/errors.go delete mode 100644 vendor/github.com/docker/docker/api/types/filters/parse.go delete mode 100644 vendor/github.com/docker/docker/api/types/image/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/api/types/image/image.go delete mode 100644 vendor/github.com/docker/docker/api/types/image/opts.go delete mode 100644 vendor/github.com/docker/docker/api/types/network/endpoint.go delete mode 100644 vendor/github.com/docker/docker/api/types/network/ipam.go delete mode 100644 vendor/github.com/docker/docker/api/types/network/network.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/authconfig.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/registry.go delete mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto delete mode 100644 vendor/github.com/docker/docker/api/types/system/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/api/types/system/security_opts.go delete mode 100644 vendor/github.com/docker/docker/api/types/types.go delete mode 100644 vendor/github.com/docker/docker/api/types/types_deprecated.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/options.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_update.go delete mode 100644 vendor/github.com/docker/docker/client/README.md delete mode 100644 vendor/github.com/docker/docker/client/build_cancel.go delete mode 100644 vendor/github.com/docker/docker/client/build_prune.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_create.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_list.go delete mode 100644 vendor/github.com/docker/docker/client/client_deprecated.go delete mode 100644 vendor/github.com/docker/docker/client/client_interfaces.go delete mode 100644 vendor/github.com/docker/docker/client/client_unix.go delete mode 100644 vendor/github.com/docker/docker/client/client_windows.go delete mode 100644 vendor/github.com/docker/docker/client/config_create.go delete mode 100644 vendor/github.com/docker/docker/client/config_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/config_list.go delete mode 100644 vendor/github.com/docker/docker/client/config_remove.go delete mode 100644 vendor/github.com/docker/docker/client/config_update.go delete mode 100644 vendor/github.com/docker/docker/client/container_attach.go delete mode 100644 vendor/github.com/docker/docker/client/container_create.go delete mode 100644 vendor/github.com/docker/docker/client/container_exec.go delete mode 100644 vendor/github.com/docker/docker/client/container_export.go delete mode 100644 vendor/github.com/docker/docker/client/container_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/container_kill.go delete mode 100644 vendor/github.com/docker/docker/client/container_logs.go delete mode 100644 vendor/github.com/docker/docker/client/container_pause.go delete mode 100644 vendor/github.com/docker/docker/client/container_prune.go delete mode 100644 vendor/github.com/docker/docker/client/container_rename.go delete mode 100644 vendor/github.com/docker/docker/client/container_resize.go delete mode 100644 vendor/github.com/docker/docker/client/container_restart.go delete mode 100644 vendor/github.com/docker/docker/client/container_start.go delete mode 100644 vendor/github.com/docker/docker/client/container_stats.go delete mode 100644 vendor/github.com/docker/docker/client/container_stop.go delete mode 100644 vendor/github.com/docker/docker/client/container_top.go delete mode 100644 vendor/github.com/docker/docker/client/container_unpause.go delete mode 100644 vendor/github.com/docker/docker/client/container_update.go delete mode 100644 vendor/github.com/docker/docker/client/container_wait.go delete mode 100644 vendor/github.com/docker/docker/client/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/client/distribution_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/events.go delete mode 100644 vendor/github.com/docker/docker/client/image_create.go delete mode 100644 vendor/github.com/docker/docker/client/image_import.go delete mode 100644 vendor/github.com/docker/docker/client/image_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/image_list.go delete mode 100644 vendor/github.com/docker/docker/client/image_load.go delete mode 100644 vendor/github.com/docker/docker/client/image_prune.go delete mode 100644 vendor/github.com/docker/docker/client/image_pull.go delete mode 100644 vendor/github.com/docker/docker/client/image_push.go delete mode 100644 vendor/github.com/docker/docker/client/image_tag.go delete mode 100644 vendor/github.com/docker/docker/client/info.go delete mode 100644 vendor/github.com/docker/docker/client/login.go delete mode 100644 vendor/github.com/docker/docker/client/network_connect.go delete mode 100644 vendor/github.com/docker/docker/client/network_create.go delete mode 100644 vendor/github.com/docker/docker/client/network_disconnect.go delete mode 100644 vendor/github.com/docker/docker/client/network_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/network_list.go delete mode 100644 vendor/github.com/docker/docker/client/network_prune.go delete mode 100644 vendor/github.com/docker/docker/client/network_remove.go delete mode 100644 vendor/github.com/docker/docker/client/node_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/node_list.go delete mode 100644 vendor/github.com/docker/docker/client/node_update.go delete mode 100644 vendor/github.com/docker/docker/client/ping.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_create.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_disable.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_enable.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_install.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_list.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_push.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_remove.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_set.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_upgrade.go delete mode 100644 vendor/github.com/docker/docker/client/secret_create.go delete mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/secret_list.go delete mode 100644 vendor/github.com/docker/docker/client/secret_remove.go delete mode 100644 vendor/github.com/docker/docker/client/secret_update.go delete mode 100644 vendor/github.com/docker/docker/client/service_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/service_list.go delete mode 100644 vendor/github.com/docker/docker/client/service_logs.go delete mode 100644 vendor/github.com/docker/docker/client/service_remove.go delete mode 100644 vendor/github.com/docker/docker/client/service_update.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_init.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_join.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_leave.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_update.go delete mode 100644 vendor/github.com/docker/docker/client/task_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/task_list.go delete mode 100644 vendor/github.com/docker/docker/client/task_logs.go delete mode 100644 vendor/github.com/docker/docker/client/version.go delete mode 100644 vendor/github.com/docker/docker/client/volume_create.go delete mode 100644 vendor/github.com/docker/docker/client/volume_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/volume_list.go delete mode 100644 vendor/github.com/docker/docker/client/volume_prune.go delete mode 100644 vendor/github.com/docker/docker/client/volume_remove.go delete mode 100644 vendor/github.com/docker/docker/client/volume_update.go delete mode 100644 vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go delete mode 100644 vendor/github.com/docker/docker/internal/multierror/multierror.go delete mode 100644 vendor/github.com/docker/go-connections/nat/nat.go delete mode 100644 vendor/github.com/docker/go-connections/nat/parse.go delete mode 100644 vendor/github.com/docker/go-connections/nat/sort.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/README.md create mode 100644 vendor/github.com/docker/go-connections/sockets/unix_socket_unix.go create mode 100644 vendor/github.com/docker/go-connections/sockets/unix_socket_windows.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go delete mode 100644 vendor/github.com/gogo/protobuf/AUTHORS delete mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/gogo/protobuf/LICENSE delete mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go rename vendor/github.com/{docker/docker => moby/moby/api}/LICENSE (100%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/blkiodev/blkio.go (85%) create mode 100644 vendor/github.com/moby/moby/api/types/build/build.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/build/cache.go (66%) create mode 100644 vendor/github.com/moby/moby/api/types/build/disk_usage.go create mode 100644 vendor/github.com/moby/moby/api/types/checkpoint/create_request.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/checkpoint/list.go (100%) rename vendor/github.com/{docker/docker/api/types => moby/moby/api/types/common}/error_response.go (73%) rename vendor/github.com/{docker/docker/api/types => moby/moby/api/types/common}/error_response_ext.go (86%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/common/id_response.go (87%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/change_type.go (87%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/change_types.go (100%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/commit.go (76%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/config.go (57%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/container.go (69%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/create_request.go (89%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/create_response.go (70%) create mode 100644 vendor/github.com/moby/moby/api/types/container/disk_usage.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/errors.go (100%) create mode 100644 vendor/github.com/moby/moby/api/types/container/exec.go create mode 100644 vendor/github.com/moby/moby/api/types/container/exec_create_request.go create mode 100644 vendor/github.com/moby/moby/api/types/container/exec_start_request.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/filesystem_change.go (90%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/health.go (81%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/hostconfig.go (93%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/hostconfig_unix.go (89%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/hostconfig_windows.go (90%) create mode 100644 vendor/github.com/moby/moby/api/types/container/network_settings.go rename vendor/github.com/{docker/docker/api/types/container/port.go => moby/moby/api/types/container/port_summary.go} (56%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/state.go (59%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/stats.go (67%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/top_response.go (63%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/update_response.go (76%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/wait_exit_error.go (86%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/wait_response.go (80%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/container/waitcondition.go (91%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/events/events.go (89%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/image/delete_response.go (89%) create mode 100644 vendor/github.com/moby/moby/api/types/image/disk_usage.go create mode 100644 vendor/github.com/moby/moby/api/types/image/image.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/image/image_history.go (84%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/image/image_inspect.go (70%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/image/manifest.go (100%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/image/summary.go (93%) create mode 100644 vendor/github.com/moby/moby/api/types/jsonstream/json_error.go create mode 100644 vendor/github.com/moby/moby/api/types/jsonstream/message.go create mode 100644 vendor/github.com/moby/moby/api/types/jsonstream/progress.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/mount/mount.go (98%) create mode 100644 vendor/github.com/moby/moby/api/types/network/config_reference.go create mode 100644 vendor/github.com/moby/moby/api/types/network/connect_request.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/network/create_response.go (71%) create mode 100644 vendor/github.com/moby/moby/api/types/network/disconnect_request.go create mode 100644 vendor/github.com/moby/moby/api/types/network/endpoint.go create mode 100644 vendor/github.com/moby/moby/api/types/network/endpoint_resource.go create mode 100644 vendor/github.com/moby/moby/api/types/network/hwaddr.go create mode 100644 vendor/github.com/moby/moby/api/types/network/inspect.go create mode 100644 vendor/github.com/moby/moby/api/types/network/ipam.go create mode 100644 vendor/github.com/moby/moby/api/types/network/ipam_status.go create mode 100644 vendor/github.com/moby/moby/api/types/network/network.go create mode 100644 vendor/github.com/moby/moby/api/types/network/network_types.go create mode 100644 vendor/github.com/moby/moby/api/types/network/peer_info.go create mode 100644 vendor/github.com/moby/moby/api/types/network/port.go create mode 100644 vendor/github.com/moby/moby/api/types/network/service_info.go create mode 100644 vendor/github.com/moby/moby/api/types/network/status.go create mode 100644 vendor/github.com/moby/moby/api/types/network/subnet_status.go create mode 100644 vendor/github.com/moby/moby/api/types/network/summary.go create mode 100644 vendor/github.com/moby/moby/api/types/network/task.go create mode 100644 vendor/github.com/moby/moby/api/types/plugin/.gitignore create mode 100644 vendor/github.com/moby/moby/api/types/plugin/capability.go rename vendor/github.com/{docker/docker/api/types/plugin_device.go => moby/moby/api/types/plugin/device.go} (72%) rename vendor/github.com/{docker/docker/api/types/plugin_env.go => moby/moby/api/types/plugin/env.go} (77%) rename vendor/github.com/{docker/docker/api/types/plugin_mount.go => moby/moby/api/types/plugin/mount.go} (65%) rename vendor/github.com/{docker/docker/api/types => moby/moby/api/types/plugin}/plugin.go (52%) create mode 100644 vendor/github.com/moby/moby/api/types/plugin/plugin_responses.go create mode 100644 vendor/github.com/moby/moby/api/types/registry/auth_response.go create mode 100644 vendor/github.com/moby/moby/api/types/registry/authconfig.go create mode 100644 vendor/github.com/moby/moby/api/types/registry/registry.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/registry/search.go (63%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/storage/driver_data.go (61%) create mode 100644 vendor/github.com/moby/moby/api/types/storage/root_f_s_storage.go create mode 100644 vendor/github.com/moby/moby/api/types/storage/root_f_s_storage_snapshot.go create mode 100644 vendor/github.com/moby/moby/api/types/storage/storage.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/common.go (95%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/config.go (86%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/container.go (95%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/network.go (79%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/node.go (91%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/runtime.go (54%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/secret.go (84%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/service.go (71%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/service_create_response.go (73%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/service_update_response.go (66%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/swarm.go (92%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/swarm/task.go (93%) create mode 100644 vendor/github.com/moby/moby/api/types/system/disk_usage.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/system/info.go (89%) rename vendor/github.com/{docker/docker => moby/moby}/api/types/system/runtime.go (79%) create mode 100644 vendor/github.com/moby/moby/api/types/system/version_response.go create mode 100644 vendor/github.com/moby/moby/api/types/types.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/volume/cluster_volume.go (99%) rename vendor/github.com/{docker/docker/api/types/volume/create_options.go => moby/moby/api/types/volume/create_request.go} (65%) create mode 100644 vendor/github.com/moby/moby/api/types/volume/disk_usage.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/volume/list_response.go (74%) create mode 100644 vendor/github.com/moby/moby/api/types/volume/prune_report.go rename vendor/github.com/{docker/docker => moby/moby}/api/types/volume/volume.go (81%) create mode 100644 vendor/github.com/moby/moby/client/LICENSE create mode 100644 vendor/github.com/moby/moby/client/README.md create mode 100644 vendor/github.com/moby/moby/client/auth.go create mode 100644 vendor/github.com/moby/moby/client/build_cancel.go create mode 100644 vendor/github.com/moby/moby/client/build_prune.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_create.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_list.go create mode 100644 vendor/github.com/moby/moby/client/checkpoint_remove.go rename vendor/github.com/{docker/docker => moby/moby}/client/client.go (61%) create mode 100644 vendor/github.com/moby/moby/client/client_interfaces.go create mode 100644 vendor/github.com/moby/moby/client/client_unix.go create mode 100644 vendor/github.com/moby/moby/client/client_windows.go create mode 100644 vendor/github.com/moby/moby/client/config_create.go create mode 100644 vendor/github.com/moby/moby/client/config_inspect.go create mode 100644 vendor/github.com/moby/moby/client/config_list.go create mode 100644 vendor/github.com/moby/moby/client/config_remove.go create mode 100644 vendor/github.com/moby/moby/client/config_update.go create mode 100644 vendor/github.com/moby/moby/client/container_attach.go rename vendor/github.com/{docker/docker => moby/moby}/client/container_commit.go (55%) rename vendor/github.com/{docker/docker => moby/moby}/client/container_copy.go (58%) create mode 100644 vendor/github.com/moby/moby/client/container_create.go create mode 100644 vendor/github.com/moby/moby/client/container_create_opts.go rename vendor/github.com/{docker/docker => moby/moby}/client/container_diff.go (64%) create mode 100644 vendor/github.com/moby/moby/client/container_diff_opts.go create mode 100644 vendor/github.com/moby/moby/client/container_exec.go create mode 100644 vendor/github.com/moby/moby/client/container_export.go create mode 100644 vendor/github.com/moby/moby/client/container_inspect.go create mode 100644 vendor/github.com/moby/moby/client/container_kill.go rename vendor/github.com/{docker/docker => moby/moby}/client/container_list.go (53%) create mode 100644 vendor/github.com/moby/moby/client/container_logs.go create mode 100644 vendor/github.com/moby/moby/client/container_pause.go create mode 100644 vendor/github.com/moby/moby/client/container_prune.go rename vendor/github.com/{docker/docker => moby/moby}/client/container_remove.go (50%) create mode 100644 vendor/github.com/moby/moby/client/container_rename.go create mode 100644 vendor/github.com/moby/moby/client/container_resize.go create mode 100644 vendor/github.com/moby/moby/client/container_restart.go create mode 100644 vendor/github.com/moby/moby/client/container_start.go create mode 100644 vendor/github.com/moby/moby/client/container_stats.go create mode 100644 vendor/github.com/moby/moby/client/container_stop.go create mode 100644 vendor/github.com/moby/moby/client/container_top.go create mode 100644 vendor/github.com/moby/moby/client/container_unpause.go create mode 100644 vendor/github.com/moby/moby/client/container_update.go create mode 100644 vendor/github.com/moby/moby/client/container_wait.go create mode 100644 vendor/github.com/moby/moby/client/distribution_inspect.go rename vendor/github.com/{docker/docker => moby/moby}/client/envvars.go (72%) rename vendor/github.com/{docker/docker => moby/moby}/client/errors.go (75%) create mode 100644 vendor/github.com/moby/moby/client/filters.go rename vendor/github.com/{docker/docker => moby/moby}/client/hijack.go (65%) rename vendor/github.com/{docker/docker => moby/moby}/client/image_build.go (79%) rename vendor/github.com/{docker/docker/api/types/build/build.go => moby/moby/client/image_build_opts.go} (75%) rename vendor/github.com/{docker/docker => moby/moby}/client/image_history.go (72%) rename vendor/github.com/{docker/docker => moby/moby}/client/image_history_opts.go (53%) create mode 100644 vendor/github.com/moby/moby/client/image_import.go create mode 100644 vendor/github.com/moby/moby/client/image_import_opts.go create mode 100644 vendor/github.com/moby/moby/client/image_inspect.go rename vendor/github.com/{docker/docker => moby/moby}/client/image_inspect_opts.go (70%) create mode 100644 vendor/github.com/moby/moby/client/image_list.go create mode 100644 vendor/github.com/moby/moby/client/image_list_opts.go create mode 100644 vendor/github.com/moby/moby/client/image_load.go rename vendor/github.com/{docker/docker => moby/moby}/client/image_load_opts.go (69%) create mode 100644 vendor/github.com/moby/moby/client/image_prune.go create mode 100644 vendor/github.com/moby/moby/client/image_pull.go create mode 100644 vendor/github.com/moby/moby/client/image_pull_opts.go create mode 100644 vendor/github.com/moby/moby/client/image_push.go create mode 100644 vendor/github.com/moby/moby/client/image_push_opts.go rename vendor/github.com/{docker/docker => moby/moby}/client/image_remove.go (73%) create mode 100644 vendor/github.com/moby/moby/client/image_remove_opts.go rename vendor/github.com/{docker/docker => moby/moby}/client/image_save.go (50%) rename vendor/github.com/{docker/docker => moby/moby}/client/image_save_opts.go (60%) rename vendor/github.com/{docker/docker => moby/moby}/client/image_search.go (70%) create mode 100644 vendor/github.com/moby/moby/client/image_search_opts.go create mode 100644 vendor/github.com/moby/moby/client/image_tag.go create mode 100644 vendor/github.com/moby/moby/client/internal/json-stream.go create mode 100644 vendor/github.com/moby/moby/client/internal/jsonmessages.go rename vendor/github.com/{docker/docker/api/types/time => moby/moby/client/internal/timestamp}/timestamp.go (98%) create mode 100644 vendor/github.com/moby/moby/client/login.go create mode 100644 vendor/github.com/moby/moby/client/network_connect.go create mode 100644 vendor/github.com/moby/moby/client/network_create.go create mode 100644 vendor/github.com/moby/moby/client/network_disconnect.go create mode 100644 vendor/github.com/moby/moby/client/network_inspect.go create mode 100644 vendor/github.com/moby/moby/client/network_inspect_opts.go create mode 100644 vendor/github.com/moby/moby/client/network_list.go create mode 100644 vendor/github.com/moby/moby/client/network_list_opts.go create mode 100644 vendor/github.com/moby/moby/client/network_prune.go create mode 100644 vendor/github.com/moby/moby/client/network_remove.go create mode 100644 vendor/github.com/moby/moby/client/node_inspect.go create mode 100644 vendor/github.com/moby/moby/client/node_list.go rename vendor/github.com/{docker/docker => moby/moby}/client/node_remove.go (56%) create mode 100644 vendor/github.com/moby/moby/client/node_update.go rename vendor/github.com/{docker/docker => moby/moby}/client/options.go (66%) create mode 100644 vendor/github.com/moby/moby/client/ping.go rename vendor/github.com/{docker/docker/api/types => moby/moby/client/pkg}/versions/compare.go (94%) create mode 100644 vendor/github.com/moby/moby/client/plugin_create.go create mode 100644 vendor/github.com/moby/moby/client/plugin_disable.go create mode 100644 vendor/github.com/moby/moby/client/plugin_enable.go create mode 100644 vendor/github.com/moby/moby/client/plugin_inspect.go create mode 100644 vendor/github.com/moby/moby/client/plugin_install.go create mode 100644 vendor/github.com/moby/moby/client/plugin_list.go create mode 100644 vendor/github.com/moby/moby/client/plugin_push.go create mode 100644 vendor/github.com/moby/moby/client/plugin_remove.go create mode 100644 vendor/github.com/moby/moby/client/plugin_set.go create mode 100644 vendor/github.com/moby/moby/client/plugin_upgrade.go rename vendor/github.com/{docker/docker => moby/moby}/client/request.go (52%) create mode 100644 vendor/github.com/moby/moby/client/secret_create.go create mode 100644 vendor/github.com/moby/moby/client/secret_inspect.go create mode 100644 vendor/github.com/moby/moby/client/secret_list.go create mode 100644 vendor/github.com/moby/moby/client/secret_remove.go create mode 100644 vendor/github.com/moby/moby/client/secret_update.go rename vendor/github.com/{docker/docker => moby/moby}/client/service_create.go (63%) create mode 100644 vendor/github.com/moby/moby/client/service_inspect.go create mode 100644 vendor/github.com/moby/moby/client/service_list.go create mode 100644 vendor/github.com/moby/moby/client/service_logs.go create mode 100644 vendor/github.com/moby/moby/client/service_remove.go create mode 100644 vendor/github.com/moby/moby/client/service_update.go create mode 100644 vendor/github.com/moby/moby/client/swarm_get_unlock_key.go create mode 100644 vendor/github.com/moby/moby/client/swarm_init.go create mode 100644 vendor/github.com/moby/moby/client/swarm_inspect.go create mode 100644 vendor/github.com/moby/moby/client/swarm_join.go create mode 100644 vendor/github.com/moby/moby/client/swarm_leave.go create mode 100644 vendor/github.com/moby/moby/client/swarm_unlock.go create mode 100644 vendor/github.com/moby/moby/client/swarm_update.go create mode 100644 vendor/github.com/moby/moby/client/system_disk_usage.go create mode 100644 vendor/github.com/moby/moby/client/system_events.go create mode 100644 vendor/github.com/moby/moby/client/system_info.go create mode 100644 vendor/github.com/moby/moby/client/task_inspect.go create mode 100644 vendor/github.com/moby/moby/client/task_list.go create mode 100644 vendor/github.com/moby/moby/client/task_logs.go rename vendor/github.com/{docker/docker => moby/moby}/client/utils.go (61%) create mode 100644 vendor/github.com/moby/moby/client/version.go create mode 100644 vendor/github.com/moby/moby/client/volume_create.go create mode 100644 vendor/github.com/moby/moby/client/volume_inspect.go create mode 100644 vendor/github.com/moby/moby/client/volume_list.go create mode 100644 vendor/github.com/moby/moby/client/volume_prune.go create mode 100644 vendor/github.com/moby/moby/client/volume_remove.go create mode 100644 vendor/github.com/moby/moby/client/volume_update.go diff --git a/go.mod b/go.mod index 86a2c58b4..ddf1bc7d9 100644 --- a/go.mod +++ b/go.mod @@ -6,11 +6,12 @@ require ( github.com/containerd/stargz-snapshotter/estargz v0.16.3 github.com/docker/cli v28.2.2+incompatible github.com/docker/distribution v2.8.3+incompatible - github.com/docker/docker v28.2.2+incompatible github.com/google/go-cmp v0.7.0 github.com/klauspost/compress v1.18.0 github.com/mitchellh/go-homedir v1.1.0 github.com/moby/docker-image-spec v1.3.1 + github.com/moby/moby/api v1.52.0 + github.com/moby/moby/client v0.1.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 github.com/spf13/cobra v1.9.1 @@ -24,20 +25,15 @@ require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect - github.com/containerd/log v0.1.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect - github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/moby/sys/atomicwriter v0.1.0 // indirect - github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect - github.com/morikuni/aec v1.0.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -46,13 +42,9 @@ require ( go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel v1.36.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect go.opentelemetry.io/otel/metric v1.36.0 // indirect go.opentelemetry.io/otel/trace v1.36.0 // indirect golang.org/x/mod v0.25.0 // indirect golang.org/x/sys v0.33.0 // indirect - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - gotest.tools/v3 v3.0.3 // indirect ) diff --git a/go.sum b/go.sum index 3f195048b..7b21db824 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,11 @@ cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -26,12 +20,10 @@ github.com/docker/cli v28.2.2+incompatible h1:qzx5BNUDFqlvyq4AHzdNB7gSyVTmU4cgsy github.com/docker/cli v28.2.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw= -github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -41,19 +33,12 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -64,19 +49,14 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= -github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= -github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= -github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/moby/moby/api v1.52.0 h1:00BtlJY4MXkkt84WhUZPRqt5TvPbgig2FZvTbe3igYg= +github.com/moby/moby/api v1.52.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc= +github.com/moby/moby/client v0.1.0 h1:nt+hn6O9cyJQqq5UWnFGqsZRTS/JirUqzPjEl0Bdc/8= +github.com/moby/moby/client v0.1.0/go.mod h1:O+/tw5d4a1Ha/ZA/tPxIZJapJRUS6LNZ1wiVRxYHyUE= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -89,7 +69,6 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -98,18 +77,12 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= @@ -118,65 +91,24 @@ go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFw go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= -go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= -go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= -google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= -google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= diff --git a/pkg/v1/daemon/image.go b/pkg/v1/daemon/image.go index 1924702f3..f76a99187 100644 --- a/pkg/v1/daemon/image.go +++ b/pkg/v1/daemon/image.go @@ -21,7 +21,7 @@ import ( "sync" "time" - api "github.com/docker/docker/api/types/image" + api "github.com/moby/moby/api/types/image" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" diff --git a/pkg/v1/daemon/image_test.go b/pkg/v1/daemon/image_test.go index fe0f6a4e4..1760a4268 100644 --- a/pkg/v1/daemon/image_test.go +++ b/pkg/v1/daemon/image_test.go @@ -23,10 +23,10 @@ import ( "strings" "testing" - api "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/storage" - "github.com/docker/docker/client" specs "github.com/moby/docker-image-spec/specs-go/v1" + api "github.com/moby/moby/api/types/image" + "github.com/moby/moby/api/types/storage" + "github.com/moby/moby/client" "github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/v1/compare" diff --git a/pkg/v1/daemon/options.go b/pkg/v1/daemon/options.go index ce6cfab20..bf3cc6811 100644 --- a/pkg/v1/daemon/options.go +++ b/pkg/v1/daemon/options.go @@ -18,8 +18,8 @@ import ( "context" "io" - api "github.com/docker/docker/api/types/image" - "github.com/docker/docker/client" + api "github.com/moby/moby/api/types/image" + "github.com/moby/moby/client" ) // ImageOption is an alias for Option. @@ -76,7 +76,7 @@ func WithUnbufferedOpener() Option { // WithClient is a functional option to allow injecting a docker client. // -// By default, github.com/docker/docker/client.FromEnv is used. +// By default, github.com/moby/moby/client.FromEnv is used. func WithClient(client Client) Option { return func(o *options) { o.client = client diff --git a/pkg/v1/daemon/write.go b/pkg/v1/daemon/write.go index 0121d5c8c..4d0210ca2 100644 --- a/pkg/v1/daemon/write.go +++ b/pkg/v1/daemon/write.go @@ -18,10 +18,10 @@ import ( "fmt" "io" - "github.com/docker/docker/client" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/moby/moby/client" ) // Tag adds a tag to an already existent image. diff --git a/pkg/v1/daemon/write_test.go b/pkg/v1/daemon/write_test.go index 6746cf9e1..9f118c2a0 100644 --- a/pkg/v1/daemon/write_test.go +++ b/pkg/v1/daemon/write_test.go @@ -22,8 +22,8 @@ import ( "strings" "testing" - api "github.com/docker/docker/api/types/image" - "github.com/docker/docker/client" + api "github.com/moby/moby/api/types/image" + "github.com/moby/moby/client" "github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/v1/empty" diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index c7c649471..000000000 --- a/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,2496 +0,0 @@ -# File @generated by hack/generate-authors.sh. DO NOT EDIT. -# This file lists all contributors to the repository. -# See hack/generate-authors.sh to make modifications. - -17neverends -7sunarni <710720732@qq.com> -Aanand Prasad -Aarni Koskela -Aaron Davidson -Aaron Feng -Aaron Hnatiw -Aaron Huslage -Aaron L. Xu -Aaron Lehmann -Aaron Welch -Aaron Yoshitake -Abdur Rehman -Abel Muiño -Abhijeet Kasurde -Abhinandan Prativadi -Abhinav Ajgaonkar -Abhishek Chanda -Abhishek Sharma -Abin Shahab -Abirdcfly -Ada Mancini -Adam Avilla -Adam Dobrawy -Adam Eijdenberg -Adam Kunk -Adam Lamers -Adam Miller -Adam Mills -Adam Pointer -Adam Simon -Adam Singer -Adam Thornton -Adam Walz -Adam Williams -AdamKorcz -Addam Hardy -Aditi Rajagopal -Aditya -Adnan Khan -Adolfo Ochagavía -Adria Casas -Adrian Moisey -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akash Gupta -Akhil Mohan -Akihiro Matsushima -Akihiro Suda -Akim Demaille -Akira Koyasu -Akshay Karle -Akshay Moghe -Al Tobey -alambike -Alan Hoyle -Alan Scherger -Alan Thompson -Alano Terblanche -Albert Callarisa -Albert Zhang -Albin Kerouanton -Alec Benson -Alejandro González Hevia -Aleksa Sarai -Aleksandr Chebotov -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Chen -Alex Coventry -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Goodman -Alex Nordlund -Alex Olshansky -Alex Samorukov -Alex Stockinger -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Midlash -Alexander Morozov -Alexander Polakov -Alexander Shopov -Alexandre Beslic -Alexandre Garnier -Alexandre González -Alexandre Jomin -Alexandru Sfirlogea -Alexei Margasov -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis Ries -Alexis Thomas -Alfred Landrum -Ali Dehghani -Alicia Lauerman -Alihan Demir -Allen Madsen -Allen Sun -almoehi -Alvaro Saurin -Alvin Deng -Alvin Richards -amangoel -Amen Belayneh -Ameya Gawde -Amir Goldstein -AmirBuddy -Amit Bakshi -Amit Krishnan -Amit Shukla -Amr Gawish -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anca Iordache -Anchal Agrawal -Anda Xu -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Denisse Gómez -Andrea Luzzardi -Andrea Turli -Andreas Elvers -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrei Gherzan -Andrei Ushakov -Andrei Vagin -Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me> -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew He -Andrew Hsu -Andrew Kim -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew McDonnell -Andrew Munsell -Andrew Pennebaker -Andrew Po -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Kolomentsev -Andrey Petrov -Andrey Stolbovsky -André Martins -Andrés Maldonado -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Lindeman -Andy Rothfusz -Andy Smith -Andy Wilson -Andy Zhang -Aneesh Kulkarni -Anes Hasicic -Angel Velazquez -Anil Belur -Anil Madhavapeddy -Anirudh Aithal -Ankit Jain -Ankush Agarwal -Anonmily -Anran Qiao -Anshul Pundir -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anthony Sottile -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Aguilar -Antonio Murdaca -Antonis Kalipetis -Antony Messerli -Anuj Bahuguna -Anuj Varma -Anusha Ragunathan -Anyu Wang -apocas -Arash Deshmeh -arcosx -ArikaChen -Arko Dasgupta -Arnaud Lefebvre -Arnaud Porterie -Arnaud Rebillout -Artem Khramov -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asad Saeeduddin -Asbjørn Enge -Ashly Mathew -Austin Vazquez -averagehuman -Avi Das -Avi Kivity -Avi Miller -Avi Vaid -Azat Khuyiyakhmetov -Bao Yonglei -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -Bastien Pascard -bdevloed -Bearice Ren -Ben Bonnefoy -Ben Firshman -Ben Golub -Ben Gould -Ben Hall -Ben Langfeld -Ben Lovy -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benjamin Baker -Benjamin Boudreau -Benjamin Böhmke -Benjamin Wang -Benjamin Yolken -Benny Ng -Benoit Chesneau -Bernerd Schaefer -Bernhard M. Wiedemann -Bert Goethals -Bertrand Roussel -Bevisy Zhang -Bharath Thiruveedula -Bhiraj Butala -Bhumika Bayani -Bilal Amarni -Bill Wang -Billy Ridgway -Bily Zhang -Bin Liu -Bingshen Wang -Bjorn Neergaard -Blake Geno -Boaz Shuster -bobby abbott -Bojun Zhu -Boqin Qin -Boris Pruessmann -Boshi Lian -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brendon Smith -Brennan Kinney <5098581+polarathene@users.noreply.github.com> -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brett Milford -Brett Randall -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Schwind -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Brielle Broder -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bruno Tavares -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Calvin Liu -Cameron Boehmer -Cameron Sparr -Cameron Spear -Campbell Allen -Candid Dauth -Cao Weiwei -Carl Henrik Lunde -Carl Loa Odin -Carl X. Su -Carlo Mion -Carlos Alexandro Becker -Carlos de Paula -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Catalin Pirvu -Ce Gao -Cedric Davies -Cesar Talledo -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander Govindarajan -Chanhun Jeong -Chao Wang -Charity Kathure -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charles Smith -Charlie Drage -Charlie Lewis -Chase Bolt -ChaYoung You -Chee Hau Lim -Chen Chao -Chen Chuanliang -Chen Hanxiao -Chen Min -Chen Mingjie -Chen Qiu -Cheng-mean Liu -Chengfei Shang -Chengguang Xu -Chengyu Zhu -Chentianze -Chenyang Yan -chenyuzhu -Chetan Birajdar -Chewey -Chia-liang Kao -Chiranjeevi Tirunagari -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dias -Chris Dituri -Chris Fordham -Chris Gavin -Chris Gibson -Chris Khoo -Chris Kreussling (Flatbush Gardener) -Chris McKinnel -Chris McKinnel -Chris Price -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Telfer -Chris Wahl -Chris Weyl -Chris White -Christian Becker -Christian Berendt -Christian Brauner -Christian Böhme -Christian Muehlhaeuser -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -Christoph Ziebuhr -Christophe Mehay -Christophe Troestler -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Petito -Christopher Rigor -Christy Norman -Chun Chen -Ciro S. Costa -Clayton Coleman -Clint Armstrong -Clinton Kitson -clubby789 -Cody Roseborough -Coenraad Loubser -Colin Dunklau -Colin Hebert -Colin Panisset -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Conor Evans -Corbin Coleman -Corey Farrell -Cory Forsyth -Cory Snider -cressie176 -Cristian Ariza -Cristian Staretu -cristiano balducci -Cristina Yenyxe Gonzalez Garcia -Cruceru Calin-Cristian -cui fliter -CUI Wei -Cuong Manh Le -Cyprian Gracz -Cyril F -Da McGrady -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damian Smyth -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Feldman -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Plamadeala -Dan Stine -Dan Williams -Dani Hodovic -Dani Louca -Daniel Antlinger -Daniel Black -Daniel Dao -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Grunwell -Daniel Guns -Daniel Helfand -Daniel Hiltgen -Daniel J Walsh -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel P. Berrangé -Daniel Robinson -Daniel S -Daniel Sweet -Daniel Von Fange -Daniel Watkins -Daniel X Moore -Daniel YC Lin -Daniel Zhang -Daniele Rondina -Danny Berger -Danny Milosavljevic -Danny Yates -Danyal Khaliq -Darren Coxall -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Davanum Srinivas -Dave Barboza -Dave Goodchild -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Bellotti -David Calavera -David Chung -David Corking -David Cramer -David Currie -David Davis -David Dooling -David Gageot -David Gebler -David Glasser -David Karlsson <35727626+dvdksn@users.noreply.github.com> -David Lawrence -David Lechner -David M. Karr -David Mackey -David Manouchehri -David Mat -David Mcanulty -David McKay -David O'Rourke -David P Hilton -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Trott -David Wang <00107082@163.com> -David Williamson -David Xia -David Young -Davide Ceretti -Dawn Chen -dbdd -dcylabs -Debayan De -Deborah Gertrude Digges -deed02392 -Deep Debroy -Deng Guangxing -Deni Bertovic -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Chen -Dennis Chen -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -Devon Estes -Devvyn Murphy -Dharmit Shah -Dhawal Yogesh Bhanushali -Dhilip Kumars -Diego Romero -Diego Siqueira -Dieter Reuter -Dillon Dixon -Dima Stopel -Dimitri John Ledkov -Dimitris Mandalidis -Dimitris Rozakis -Dimitry Andric -Dinesh Subhraveti -Ding Fei -dingwei -Diogo Monica -DiuDiugirl -Djibril Koné -Djordje Lukic -dkumor -Dmitri Logvinenko -Dmitri Shuralyov -Dmitry Demeshchuk -Dmitry Gusev -Dmitry Kononenko -Dmitry Sharshakov -Dmitry Shyshkin -Dmitry Smirnov -Dmitry V. Krivenok -Dmitry Vorobev -Dmytro Iakovliev -docker-unir[bot] -Dolph Mathews -Dominic Tubach -Dominic Yin -Dominik Dingel -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donghwa Kim -Donovan Jones -Dorin Geman -Doron Podoleanu -Doug Davis -Doug MacEachern -Doug Tangren -Douglas Curtis -Dr Nic Williams -dragon788 -Dražen Lučanin -Drew Erny -Drew Hubl -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivin Giske Skaaren -Eivind Uggedal -Elan Ruusamäe -Elango Sivanandam -Elena Morozova -Eli Uriegas -Elias Faxö -Elias Koromilas -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Davtyan -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Eng Zer Jun -Enguerran -Enrico Weigelt, metux IT consult -Eohyung Lee -epeterso -er0k -Eric Barch -Eric Curtin -Eric G. Noriega -Eric Hanchrow -Eric Lee -Eric Mountain -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Soderstrom -Eric Yang -Eric-Olivier Lamey -Erica Windisch -Erich Cordoba -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik Sipsma -Erik Sjölund -Erik St. Martin -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Espen Suenson -Ethan Bell -Ethan Mosbaugh -Euan Harris -Euan Kemp -Eugen Krizo -Eugene Yakubovich -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Lezar -Evan Phoenix -Evan Wies -Evelyn Xu -Everett Toews -Evgeniy Makhrov -Evgeny Shmarnev -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Ezra Silvera -Fabian Kramm -Fabian Lauer -Fabian Raetz -Fabiano Rosas -Fabio Falci -Fabio Kung -Fabio Rapposelli -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangming Fang -Fangyuan Gao <21551127@zju.edu.cn> -fanjiyun -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felipe Oliveira -Felipe Ruhland -Felix Abecassis -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Ruess -Felix Schindler -Feng Yan -Fengtu Wang -Ferenc Szabo -Fernando -Fero Volar -Feroz Salam -Ferran Rodenas -Filipe Brandenburger -Filipe Oliveira -Filipe Pina -Flavio Castelli -Flavio Crisciani -Florian -Florian Klein -Florian Maier -Florian Noeding -Florian Schmaus -Florian Weingarten -Florin Asavoaie -Florin Patan -fonglh -Foysal Iqbal -Francesc Campoy -Francesco Degrassi -Francesco Mari -Francis Chuang -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Frank Villaro-Dixon -Frank Yang -François Scala -Fred Lifton -Frederick F. Kautz IV -Frederico F. de Oliveira -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -Frieder Bluemle -frobnicaty <92033765+frobnicaty@users.noreply.github.com> -Frédéric Dalleau -Fu JinLin -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Adrian Samfira -Gabriel Goller -Gabriel L. Somlo -Gabriel Linder -Gabriel Monroy -Gabriel Nicolas Avellaneda -Gabriel Tomitsuka -Gaetan de Villele -Galen Sampson -Gang Qiao -Gareth Rushgrove -Garrett Barboza -Gary Schaetz -Gaurav -Gaurav Singh -Gaël PORTAY -Genki Takiuchi -GennadySpb -Geoff Levand -Geoffrey Bachelet -Geon Kim -George Adams -George Kontridze -George Ma -George MacRorie -George Xie -Georgi Hristozov -Georgy Yakovlev -Gereon Frey -German DZ -Gert van Valkenhoef -Gerwim Feiken -Ghislain Bourgeois -Giampaolo Mancini -Gianluca Borello -Gildas Cuisinier -Giovan Isa Musthofa -gissehel -Giuseppe Mazzotta -Giuseppe Scrivano -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Goldwyn Rodrigues -Gopikannan Venugopalsamy -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grace Choi -Grant Millar -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Pflaum -Greg Stephens -Greg Thornton -Grzegorz Jaśkiewicz -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -Gunadhya S. <6939749+gunadhya@users.noreply.github.com> -Guoqiang QI -guoxiuyan -Guri -Gurjeet Singh -Guruprasad -Gustav Sinder -gwx296173 -Günter Zöchbauer -Haichao Yang -haikuoliu -haining.cao -Hakan Özler -Hamish Hutchings -Hannes Ljungberg -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harald Niesche -Harley Laue -Harold Cooper -Harrison Turton -Harry Zhang -Harshal Patil -Harshal Patil -He Simei -He Xiaoxi -He Xin -heartlock <21521209@zju.edu.cn> -Hector Castro -Helen Xie -Henning Sprang -Hiroshi Hatake -Hiroyuki Sasagawa -Hobofan -Hollie Teal -Hong Xu -Hongbin Lu -Hongxu Jia -Honza Pokorny -Hsing-Hui Hsu -Hsing-Yu (David) Chen -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huajin Tong -huang-jl <1046678590@qq.com> -HuanHuan Ye -Huanzhong Zhang -Huayi Zhang -Hugo Barrera -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hui Kang -Hunter Blanks -huqun -Huu Nguyen -Hyeongkyu Lee -Hyzhou Zhy -Iago López Galeiras -Ian Bishop -Ian Bull -Ian Calvert -Ian Campbell -Ian Chen -Ian Lee -Ian Main -Ian Philpot -Ian Truslove -Iavael -Icaro Seara -Ignacio Capurro -Igor Dolzhikov -Igor Karpovich -Iliana Weller -Ilkka Laukkanen -Illia Antypenko -Illo Abdulrahim -Ilya Dmitrichenko -Ilya Gusev -Ilya Khlopotov -imalasong <2879499479@qq.com> -imre Fitos -inglesp -Ingo Gottwald -Innovimax -Isaac Dupree -Isabel Jimenez -Isaiah Grace -Isao Jonas -Iskander Sharipov -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -Ivan Markin -J Bruni -J. Nunn -Jack Danger Canty -Jack Laxson -Jack Walker <90711509+j2walker@users.noreply.github.com> -Jacob Atzen -Jacob Edelman -Jacob Tomlinson -Jacob Vallejo -Jacob Wen -Jaime Cepeda -Jaivish Kothari -Jake Champlin -Jake Moshenko -Jake Sanders -Jakub Drahos -Jakub Guzik -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nesbitt -James Nugent -James Sanders -James Turnbull -James Watkins-Harvey -Jameson Hyde -Jamie Hannaford -Jamshid Afshar -Jan Breig -Jan Chren -Jan Garcia -Jan Götte -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Jannick Fahlbusch -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslav Jindrak -Jaroslaw Zabiello -Jasmine Hegman -Jason A. Donenfeld -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -Jasper Siepkes -Javier Bassi -jaxgeller -Jay -Jay Kamat -Jay Lim -Jean Rouge -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Christophe Berthon -Jean-Michel Rouet -Jean-Paul Calderone -Jean-Pierre Huynh -Jean-Tiare Le Bigot -Jeeva S. Chelladhurai -Jeff Anderson -Jeff Hajewski -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Silberman -Jeff Welch -Jeff Zvier -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeongseok Kang -Jeremy Chambers -Jeremy Grosser -Jeremy Huntwork -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jeyanthinath Muthuram -Jezeniel Zapanta -Jhon Honce -Ji.Zhilong -Jian Liao -Jian Zeng -Jian Zhang -Jiang Jinyang -Jianyong Wu -Jie Luo -Jie Ma -Jihyun Hwang -Jilles Oldenbeuving -Jim Alateras -Jim Carroll -Jim Ehrismann -Jim Galasyn -Jim Lin -Jim Minter -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -Jimmy Song -jinjiadu -Jinsoo Park -Jintao Zhang -Jiri Appl -Jiri Popelka -Jiuyue Ma -Jiří Župka -jjimbo137 <115816493+jjimbo137@users.noreply.github.com> -Joakim Roubert -Joan Grau -Joao Fernandes -Joao Trindade -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johanan Lieberman -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Harris -John Howard -John Laswell -John Maguire -John Mulhausen -John OBrien III -John Starks -John Stephens -John Tims -John V. Martinez -John Warwick -John Willis -Jon Johnson -Jon Surrell -Jon Wedaman -Jonas Dohse -Jonas Geiler -Jonas Heinrich -Jonas Pfenniger -Jonathan A. Schweder -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Choy -Jonathan Dowland -Jonathan Lebon -Jonathan Lomas -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Jonathan Stoppani -Jonh Wendell -Joni Sar -Joost Cassee -Jordan Arentsen -Jordan Jennings -Jordan Sissel -Jordi Massaguer Pla -Jorge Marin -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Joseph Rothrock -Josh -Josh Bodah -Josh Bonczkowski -Josh Chorlton -Josh Eveleth -Josh Hawn -Josh Horwitz -Josh Poimboeuf -Josh Soref -Josh Wilson -Josiah Kiehl -José Tomás Albornoz -Joyce Jang -JP -JSchltggr -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Kassar -Julien Maitrehenry -Julien Pervillé -Julien Pivotto -Julio Guerra -Julio Montes -Jun Du -Jun-Ru Chang -junxu -Jussi Nummelin -Justas Brazauskas -Justen Martin -Justin Chadwell -Justin Cormack -Justin Force -Justin Keller <85903732+jk-vb@users.noreply.github.com> -Justin Menga -Justin Plock -Justin Simonelis -Justin Terry -Justyn Temme -Jyrki Puttonen -Jérémy Leherpeur -Jérôme Petazzoni -Jörg Thalheim -K. Heller -Kai Blin -Kai Qiang Wu (Kennan) -Kaijie Chen -Kaita Nakamura -Kamil Domański -Kamjar Gerami -Kanstantsin Shautsou -Kara Alexandra -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Karthik Karanth -Karthik Nayak -Kasper Fabæch Brandt -Kate Heddleston -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Kay Yan -kayrus -Kazuhiro Sera -Kazuyoshi Kato -Ke Li -Ke Xu -Kei Ohmura -Keith Hudgins -Keli Hu -Ken Bannister -Ken Cochrane -Ken Herner -Ken ICHIKAWA -Ken Reese -Kenfe-Mickaël Laventure -Kenjiro Nakayama -Kent Johnson -Kenta Tada -Kevin "qwazerty" Houdebert -Kevin Alvarez -Kevin Burke -Kevin Clark -Kevin Feyrer -Kevin J. Lynagh -Kevin Jing Qiu -Kevin Kern -Kevin Menard -Kevin Meredith -Kevin P. Kucharczyk -Kevin Parsons -Kevin Richardson -Kevin Shi -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -Kirk Easterson -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konrad Ponichtera -Konstantin Gribov -Konstantin L -Konstantin Pelykh -Kostadin Plachkov -kpcyrd -Krasi Georgiev -Krasimir Georgiev -Kris-Mikael Krister -Kristian Haugene -Kristian Heljas -Kristina Zabunova -Krystian Wojcicki -Kunal Kushwaha -Kunal Tyagi -Kyle Conroy -Kyle Linden -Kyle Squizzato -Kyle Wuolle -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -Lance Chen -Lance Kinley -Lars Andringa -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Lars-Magnus Skog -Laszlo Meszaros -Laura Brehm -Laura Frank -Laurent Bernaille -Laurent Erignoux -Laurent Goderre -Laurie Voss -Leandro Motta Barros -Leandro Siqueira -Lee Calcote -Lee Chao <932819864@qq.com> -Lee, Meng-Han -Lei Gong -Lei Jitang -Leiiwang -Len Weincier -Lennie -Leo Gallucci -Leonardo Nodari -Leonardo Taccari -Leszek Kowalski -Levi Blackstone -Levi Gross -Levi Harrison -Lewis Daly -Lewis Marshall -Lewis Peckover -Li Yi -Liam Macgillavry -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -liangwei -Liao Qingwei -Lifubang -Lihua Tang -Lily Guo -limeidan -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -liwenqi -lixiaobing10051267 -Liz Zhang -LIZAO LI -Lizzie Dixon <_@lizzie.io> -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Lotus Fenn -Louis Delossantos -Louis Opter -Luboslav Pivarc -Luca Favatella -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Lucas Chi -Lucas Molas -Lucas Silvestre -Luciano Mores -Luis Henrique Mulinari -Luis Martínez de Bartolomé Izquierdo -Luiz Svoboda -Lukas Heeren -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -Luke Marsden -Lyn -Lynda O'Leary -Lénaïc Huard -Ma Müller -Ma Shimiao -Mabin -Madhan Raj Mookkandy -Madhav Puri -Madhu Venugopal -Mageee -maggie44 <64841595+maggie44@users.noreply.github.com> -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manjunath A Kumatagi -Mansi Nahar -Manuel Meurer -Manuel Rüger -Manuel Woelker -mapk0y -Marat Radchenko -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcel Edmund Franke -Marcelo Horacio Fortino -Marcelo Salazar -Marco Hennings -Marcus Cobden -Marcus Farkas -Marcus Linke -Marcus Martins -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark Feit -Mark Jeromin -Mark McGranaghan -Mark McKinstry -Mark Milstein -Mark Oates -Mark Parker -Mark Vainomaa -Mark West -Markan Patel -Marko Mikulicic -Marko Tibold -Markus Fix -Markus Kortlang -Martijn Dwars -Martijn van Oosterhout -Martin Braun -Martin Dojcak -Martin Honermeyer -Martin Jirku -Martin Kelly -Martin Mosegaard Amdisen -Martin Muzatko -Martin Redmond -Maru Newby -Mary Anthony -Masahito Zembutsu -Masato Ohba -Masayuki Morita -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Champlon -Mathieu Le Marec - Pasquet -Mathieu Parent -Mathieu Paturel -Matt Apperson -Matt Bachmann -Matt Bajor -Matt Bentley -Matt Haggard -Matt Hoyle -Matt McCormick -Matt Moore -Matt Morrison <3maven@gmail.com> -Matt Richardson -Matt Rickard -Matt Robenolt -Matt Schurenko -Matt Williams -Matthew Heon -Matthew Lapworth -Matthew Mayer -Matthew Mosesohn -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Fronton -Matthieu Hauglustaine -Matthieu MOREL -Mattias Jernberg -Mauricio Garavaglia -mauriyouth -Max Harmathy -Max Shytikov -Max Timchenko -Maxim Fedchyshyn -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Maximiliano Maccanti -Maxwell -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mei ChunTao -Mengdi Gao -Menghui Chen -Mert Yazıcıoğlu -mgniu -Micah Zoltu -Michael A. Smith -Michael Beskin -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Irwin -Michael Kebe -Michael Kuehn -Michael Käufl -Michael Neale -Michael Nussbaum -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Stapelberg -Michael Steinert -Michael Thies -Michael Weidmann -Michael West -Michael Zhao -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Kostrzewa -Michal Minář -Michal Rostecki -Michal Wieczorek -Michaël Pailloncy -Michał Czeraszkiewicz -Michał Gryko -Michał Kosek -Michiel de Jong -Mickaël Fortunato -Mickaël Remars -Miguel Angel Fernández -Miguel Morales -Miguel Perez -Mihai Borobocea -Mihuleacc Sergiu -Mikael Davranche -Mike Brown -Mike Bush -Mike Casas -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Estes -Mike Gaffney -Mike Goelzer -Mike Leone -Mike Lundy -Mike MacCana -Mike Naberezny -Mike Snitzer -Mike Sul -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miklos Szegedi -Milas Bowman -Milind Chawre -Miloslav Trmač -mingqing -Mingzhen Feng -Misty Stanley-Jones -Mitch Capper -Mizuki Urushida -mlarcher -Mohammad Banikazemi -Mohammad Nasirifar -Mohammed Aaqib Ansari -Mohd Sadiq -Mohit Soni -Moorthy RS -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mrfly -Mrunal Patel -Muayyad Alsadi -Muhammad Zohaib Aslam -Mustafa Akın -Muthukumar R -Myeongjoon Kim -Máximo Cuadros -Médi-Rémi Hashim -Nace Oroz -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Natasha Jarus -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Baulch -Nathan Carlson -Nathan Herald -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Naveed Jamil -Neal McBurnett -Neil Horman -Neil Peterson -Nelson Chen -Neyazul Haque -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Adcock -Nick DeCoursin -Nick Irvine -Nick Neisen -Nick Parker -Nick Payne -Nick Russo -Nick Santos -Nick Stenning -Nick Stinemates -Nick Wood -NickrenREN -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolas Sterchele -Nicolas V Castet -Nicolás Hock Isaza -Niel Drummond -Nigel Poulton -Nik Nyby -Nikhil Chawla -NikolaMandic -Nikolas Garofil -Nikolay Edigaryev -Nikolay Milovanov -ningmingxiao -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -Noah Meyerhans -Noah Treuhaft -NobodyOnSE -noducks -Nolan Darilek -Nolan Miles -Noriki Nakamura -nponeccop -Nurahmadie -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -Octol1ttle -Odin Ugedal -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -ohmystack -Ole Reifschneider -Oliver Neal -Oliver Reason -Olivier Gambier -Olle Jonsson -Olli Janatuinen -Olly Pomeroy -Omri Shiv -Onur Filiz -Oriol Francès -Oscar Bonilla <6f6231@gmail.com> -oscar.chen <2972789494@qq.com> -Oskar Niburski -Otto Kekäläinen -Ouyang Liduo -Ovidio Mallo -Panagiotis Moustafellos -Paolo G. Giarrusso -Pascal -Pascal Bach -Pascal Borreli -Pascal Hartig -Patrick Böänziger -Patrick Devine -Patrick Haas -Patrick Hemmer -Patrick St. laurent -Patrick Stapleton -Patrik Cyvoct -Patrik Leifert -pattichen -Paul "TBBle" Hampson -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Furtado -Paul Hammond -Paul Jimenez -Paul Kehrer -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Seiffert -Paul Weaver -Paulo Gomes -Paulo Ribeiro -Pavel Lobashov -Pavel Matěja -Pavel Pletenev -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Pavol Vargovcik -Pawel Konczalski -Paweł Gronowski -payall4u -Peeyush Gupta -Peggy Li -Pei Su -Peng Tao -Penghan Wang -Per Weijnitz -perhapszzy@sina.com -Pete Woods -Peter Bourgon -Peter Braden -Peter Bücker -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Jaffe -Peter Kang -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Petr Švihlík -Petros Angelatos -Phil -Phil Estes -Phil Sphicas -Phil Spitler -Philip Alexander Etling -Philip K. Warren -Philip Monroe -Philipp Fruck -Philipp Gillé -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -phineas -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Dal-Pra -Pierre Wacrenier -Pierre-Alain RIVIERE -pinglanlu -Piotr Bogdan -Piotr Karbowski -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Pradip Dhara -Pradipta Kr. Banerjee -Prasanna Gautam -Pratik Karki -Prayag Verma -Priya Wadhwa -Projjol Banerji -Przemek Hejman -Puneet Pruthi -Pure White -pysqz -Qiang Huang -Qin TianHuan -Qinglan Peng -Quan Tian -qudongfang -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Rachit Sharma -Radostin Stoyanov -Rafael Fernández López -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Raja Sami -Rajat Pandit -Rajdeep Dua -Ralf Sippl -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon Brooker -Ramon van Alteren -RaviTeja Pothana -Ray Tsang -ReadmeCritic -realityone -Recursive Madman -Reficul -Regan McCooey -Remi Rampin -Remy Suen -Renato Riccieri Santos Zannon -Renaud Gaubert -Rhys Hiltner -Ri Xu -Ricardo N Feliciano -Rich Horwood -Rich Moyse -Rich Seymour -Richard Burnison -Richard Hansen -Richard Harvey -Richard Mathie -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Cowsill <42620235+rcowsill@users.noreply.github.com> -Rob Gulewich -Rob Murray -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Schneider -Robert Shade -Robert Stern -Robert Sturla -Robert Terhaar -Robert Wallis -Robert Wang -Roberto G. Hashioka -Roberto Muñoz Fernández -Robin Naundorf -Robin Schneider -Robin Speekenbrink -Robin Thoni -robpc -Rodolfo Carvalho -Rodrigo Campos -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Rohit Kapur -Rojin George -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Dudin -Roman Mazur -Roman Strashkin -Roman Volosatovs -Roman Zabaluev -Ron Smits -Ron Williams -Rong Gao -Rong Zhang -Rongxiang Song -Rony Weng -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Roy Reznik -Royce Remer -Rozhnov Alexandr -Rudolph Gottesheim -Rui Cao -Rui JingAn -Rui Lopes -Ruilin Li -Runshen Zhu -Russ Magee -Ryan Abrams -Ryan Anderson -Ryan Aslett -Ryan Barry -Ryan Belgrave -Ryan Campbell -Ryan Detzel -Ryan Fowler -Ryan Liu -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Shea -Ryan Simmen -Ryan Stelly -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -Ryan Zhang -ryancooper7 -RyanDeng -Ryo Nakao -Ryoga Saito -Régis Behmo -Rémy Greinhofer -s. rannou -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sakeven Jiang -Salahuddin Khan -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sam Thibault -Sam Whited -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -sanchayanghosh -Sandeep Bansal -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Sargun Dhillon -Sascha Andres -Sascha Grunert -SataQiu -Satnam Singh -Satoshi Amemiya -Satoshi Tagomori -Scott Bessler -Scott Collier -Scott Johnston -Scott Moser -Scott Percival -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean Lee -Sean McIntyre -Sean OMeara -Sean P. Kane -Sean Rodman -Sebastiaan van Steenis -Sebastiaan van Stijn -Sebastian Höffner -Sebastian Radloff -Sebastian Thomschke -Sebastien Goasguen -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sergii Kabashniuk -Sergio Lopez -Serhat Gülçiçek -Serhii Nakon -SeungUkLee -Sevki Hasirci -Shane Canon -Shane da Silva -Shaun Kaasten -Shaun Thompson -shaunol -Shawn Landden -Shawn Siefkas -shawnhe -Shayan Pooya -Shayne Wang -Shekhar Gulati -Sheng Yang -Shengbo Song -Shengjing Zhu -Shev Yan -Shih-Yuan Lee -Shihao Xia -Shijiang Wei -Shijun Qin -Shishir Mahajan -Shoubhik Bose -Shourya Sarcar -Shreenidhi Shedi -Shu-Wai Chow -shuai-z -Shukui Yang -Sian Lerk Lau -Siarhei Rasiukevich -Sidhartha Mani -sidharthamani -Silas Sewell -Silvan Jegen -Simão Reis -Simon Barendse -Simon Eskildsen -Simon Ferquel -Simon Leinen -Simon Menke -Simon Taranto -Simon Vikstrom -Sindhu S -Sjoerd Langkemper -skanehira -Smark Meng -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Sotiris Salloumis -Soulou -Spencer Brown -Spencer Smith -Spike Curtis -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srinivasan Srivatsan -Staf Wagemakers -Stanislav Bondarenko -Stanislav Levin -Steeve Morin -Stefan Berger -Stefan Gehrig -Stefan J. Wernli -Stefan Praszalowicz -Stefan S. -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Steffen Butzer -Stephan Henningsen -Stephan Spindler -Stephen Benjamin -Stephen Crosby -Stephen Day -Stephen Drake -Stephen Rust -Steve Desmond -Steve Dougherty -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Erenst -Steven Hartland -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Stéphane Este-Gracias -Stig Larsson -Su Wang -Subhajit Ghosh -Sujith Haridasan -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sune Keller -Sunny Gogoi -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien HOUZÉ -Sébastien Luttringer -Sébastien Stormacq -Sören Tempel -Tabakhase -Tadej Janež -Tadeusz Dudkiewicz -Takuto Sato -tang0th -Tangi Colin -Tatsuki Sugiura -Tatsushi Inagaki -Taylan Isikdemir -Taylor Jones -tcpdumppy <847462026@qq.com> -Ted M. Young -Tehmasp Chaudhri -Tejaswini Duggaraju -Tejesh Mehta -Terry Chu -terryding77 <550147740@qq.com> -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thiago Alves Silva -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Graf -Thomas Grainger -Thomas Hansen -Thomas Ledos -Thomas Leonard -Thomas Léveil -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Ti Zhou -Tiago Seabra -Tianon Gravi -Tianyi Wang -Tibor Vass -Tiffany Jernigan -Tiffany Low -Till Claassen -Till Wegmüller -Tim -Tim Bart -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Potter -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wagner -Tim Wang -Tim Waugh -Tim Wraight -Tim Zju <21651152@zju.edu.cn> -timchenxiaoyu <837829664@qq.com> -timfeirg -Timo Rothenpieler -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Munk -Tobias Pfandzelter -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Booth -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom Parker -Tom Sweeney -Tom Wilkie -Tom X. Tobin -Tom Zhao -Tomas Janousek -Tomas Kral -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tomek Mańko -Tommaso Visconti -Tomoya Tabuchi -Tomáš Hrčka -Tomáš Virtus -tonic -Tonny Xu -Tony Abboud -Tony Daws -Tony Miller -toogley -Torstein Husebø -Toshiaki Makita -Tõnis Tiigi -Trace Andreason -tracylihui <793912329@qq.com> -Trapier Marshall -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -Trevor Sullivan -Trishna Guha -Tristan Carel -Troy Denton -Tudor Brindus -Ty Alexander -Tycho Andersen -Tyler Brock -Tyler Brown -Tzu-Jung Lee -uhayate -Ulysse Carion -Umesh Yadav -Utz Bacher -vagrant -Vaidas Jablonskis -Valentin Kulesh -vanderliang -Velko Ivanov -Veres Lajos -Victor Algaze -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Toni -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Vikas Choudhary -Vikram bir Singh -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Boulineau -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitaly Ostrosablin -Vitor Anjos -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Pouzanov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vladislav Kolesnikov -Vlastimil Zeman -Vojtech Vitek (V-Teq) -voloder <110066198+voloder@users.noreply.github.com> -Walter Leibbrandt -Walter Stanish -Wang Chao -Wang Guoliang -Wang Jie -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Wang Yumu <37442693@qq.com> -wanghuaiqing -Ward Vandewege -WarheadsSE -Wassim Dhif -Wataru Ishida -Wayne Chang -Wayne Song -weebney -Weerasak Chongnguluam -Wei Fu -Wei Wu -Wei-Ting Kuo -weipeng -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenjun Tang -Wenkai Yin -wenlxie -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wenzhi Liang -Wes Morgan -Wesley Pettit -Wewang Xiaorenfine -Wiktor Kwapisiewicz -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Martin -William Riancho -William Thurston -Wilson Júnior -Wing-Kam Wong -WiseTrem -Wolfgang Nagele -Wolfgang Powisch -Wonjun Kim -WuLonghui -xamyzhao -Xia Wu -Xian Chaobo -Xianglin Gao -Xianjie -Xianlu Bird -Xiao YongBiao -Xiao Zhang -XiaoBing Jiang -Xiaodong Liu -Xiaodong Zhang -Xiaohua Ding -Xiaoxi He -Xiaoxu Chen -Xiaoyu Zhang -xichengliudui <1693291525@qq.com> -xiekeyang -Ximo Guanter Gonzálbez -xin.li -Xinbo Weng -Xinfeng Liu -Xinzi Zhou -Xiuming Chen -Xuecong Liao -xuzhaokui -Yadnyawalkya Tale -Yahya -yalpul -YAMADA Tsuyoshi -Yamasaki Masahide -Yamazaki Masashi -Yan Feng -Yan Zhu -Yang Bai -Yang Li -Yang Pengfei -yangchenliang -Yann Autissier -Yanqiang Miao -Yao Zaiyong -Yash Murty -Yassine Tijani -Yasunori Mahata -Yazhong Liu -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongxin Li -Yongzhi Pan -Yosef Fertel -You-Sheng Yang (楊有勝) -youcai -Youcef YEKHLEF -Youfu Zhang -YR Chen -Yu Changchun -Yu Chengxia -Yu Peng -Yu-Ju Hong -Yuan Sun -Yuanhong Peng -Yue Zhang -Yufei Xiong -Yuhao Fang -Yuichiro Kaneko -YujiOshima -Yunxiang Huang -Yurii Rashkovskii -Yusuf Tarık Günaydın -Yves Blusseau <90z7oey02@sneakemail.com> -Yves Junqueira -Zac Dover -Zach Borboa -Zach Gershman -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -zhangguanzhang -ZhangHang -zhangxianwei -Zhenan Ye <21551168@zju.edu.cn> -zhenghenghuo -Zhenhai Gao -Zhenkun Bi -ZhiPeng Lu -zhipengzuo -Zhou Hao -Zhoulin Xie -Zhu Guihua -Zhu Kunjia -Zhuoyun Wei -Ziheng Liu -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -Zou Yu -zqh -Zuhayr Elahi -Zunayed Ali -Álvaro Lázaro -Átila Camurça Alves -吴小白 <296015668@qq.com> -尹吉峰 -屈骏 -徐俊杰 -慕陶 -搏通 -黄艳红00139573 -정재영 diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE deleted file mode 100644 index 58b19b6d1..000000000 --- a/vendor/github.com/docker/docker/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md deleted file mode 100644 index 381f19881..000000000 --- a/vendor/github.com/docker/docker/api/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Working on the Engine API - -The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. - -It consists of various components in this repository: - -- `api/swagger.yaml` A Swagger definition of the API. -- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. -- `cli/` The command-line client. -- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. -- `daemon/` The daemon, which serves the API. - -## Swagger definition - -The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: - -1. Automatically generate documentation. -2. Automatically generate the Go server and client. (A work-in-progress.) -3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. - -## Updating the API documentation - -The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. - -The file is split into two main sections: - -- `definitions`, which defines re-usable objects used in requests and responses -- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) - -To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. - -There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). - -`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. - -## Viewing the API documentation - -When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. - -Run `make swagger-docs` and a preview will be running at `http://localhost:9000`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. - -The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go deleted file mode 100644 index c375c68f2..000000000 --- a/vendor/github.com/docker/docker/api/common.go +++ /dev/null @@ -1,20 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// Common constants for daemon and client. -const ( - // DefaultVersion of the current REST API. - DefaultVersion = "1.50" - - // MinSupportedAPIVersion is the minimum API version that can be supported - // by the API server, specified as "major.minor". Note that the daemon - // may be configured with a different minimum API version, as returned - // in [github.com/docker/docker/api/types.Version.MinAPIVersion]. - // - // API requests for API versions lower than the configured version produce - // an error. - MinSupportedAPIVersion = "1.24" - - // NoBaseImageSpecifier is the symbol used by the FROM - // command to specify that no base image is to be used. - NoBaseImageSpecifier = "scratch" -) diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml deleted file mode 100644 index f07a02737..000000000 --- a/vendor/github.com/docker/docker/api/swagger-gen.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -layout: - models: - - name: definition - source: asset:model - target: "{{ joinFilePath .Target .ModelPackage }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" - operations: - - name: handler - source: asset:serverOperation - target: "{{ joinFilePath .Target .APIPackage .Package }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml deleted file mode 100644 index 619b4470e..000000000 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ /dev/null @@ -1,13431 +0,0 @@ -# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. -# -# This is used for generating API documentation and the types used by the -# client/server. See api/README.md for more information. -# -# Some style notes: -# - This file is used by ReDoc, which allows GitHub Flavored Markdown in -# descriptions. -# - There is no maximum line length, for ease of editing and pretty diffs. -# - operationIds are in the format "NounVerb", with a singular noun. - -swagger: "2.0" -schemes: - - "http" - - "https" -produces: - - "application/json" - - "text/plain" -consumes: - - "application/json" - - "text/plain" -basePath: "/v1.50" -info: - title: "Docker Engine API" - version: "1.50" - x-logo: - url: "https://docs.docker.com/assets/images/logo-docker-main.png" - description: | - The Engine API is an HTTP API served by Docker Engine. It is the API the - Docker client uses to communicate with the Engine, so everything the Docker - client can do can be done with the API. - - Most of the client's commands map directly to API endpoints (e.g. `docker ps` - is `GET /containers/json`). The notable exception is running containers, - which consists of several API calls. - - # Errors - - The API uses standard HTTP status codes to indicate the success or failure - of the API call. The body of the response will be JSON in the following - format: - - ``` - { - "message": "page not found" - } - ``` - - # Versioning - - The API is usually changed in each release, so API calls are versioned to - ensure that clients don't break. To lock to a specific version of the API, - you prefix the URL with its version, for example, call `/v1.30/info` to use - the v1.30 version of the `/info` endpoint. If the API version specified in - the URL is not supported by the daemon, a HTTP `400 Bad Request` error message - is returned. - - If you omit the version-prefix, the current version of the API (v1.50) is used. - For example, calling `/info` is the same as calling `/v1.50/info`. Using the - API without a version-prefix is deprecated and will be removed in a future release. - - Engine releases in the near future should support this version of the API, - so your client will continue to work even if it is talking to a newer Engine. - - The API uses an open schema model, which means the server may add extra properties - to responses. Likewise, the server will ignore any extra query parameters and - request body properties. When you write clients, you need to ignore additional - properties in responses to ensure they do not break when talking to newer - daemons. - - - # Authentication - - Authentication for registries is handled client side. The client has to send - authentication details to various endpoints that need to communicate with - registries, such as `POST /images/(name)/push`. These are sent as - `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) - (JSON) string with the following structure: - - ``` - { - "username": "string", - "password": "string", - "email": "string", - "serveraddress": "string" - } - ``` - - The `serveraddress` is a domain/IP without a protocol. Throughout this - structure, double quotes are required. - - If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), - you can just pass this instead of credentials: - - ``` - { - "identitytoken": "9cbaf023786cd7..." - } - ``` - -# The tags on paths define the menu sections in the ReDoc documentation, so -# the usage of tags must make sense for that: -# - They should be singular, not plural. -# - There should not be too many tags, or the menu becomes unwieldy. For -# example, it is preferable to add a path to the "System" tag instead of -# creating a tag with a single path in it. -# - The order of tags in this list defines the order in the menu. -tags: - # Primary objects - - name: "Container" - x-displayName: "Containers" - description: | - Create and manage containers. - - name: "Image" - x-displayName: "Images" - - name: "Network" - x-displayName: "Networks" - description: | - Networks are user-defined networks that containers can be attached to. - See the [networking documentation](https://docs.docker.com/network/) - for more information. - - name: "Volume" - x-displayName: "Volumes" - description: | - Create and manage persistent storage that can be attached to containers. - - name: "Exec" - x-displayName: "Exec" - description: | - Run new commands inside running containers. Refer to the - [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) - for more information. - - To exec a command in a container, you first need to create an exec instance, - then start it. These two API endpoints are wrapped up in a single command-line - command, `docker exec`. - - # Swarm things - - name: "Swarm" - x-displayName: "Swarm" - description: | - Engines can be clustered together in a swarm. Refer to the - [swarm mode documentation](https://docs.docker.com/engine/swarm/) - for more information. - - name: "Node" - x-displayName: "Nodes" - description: | - Nodes are instances of the Engine participating in a swarm. Swarm mode - must be enabled for these endpoints to work. - - name: "Service" - x-displayName: "Services" - description: | - Services are the definitions of tasks to run on a swarm. Swarm mode must - be enabled for these endpoints to work. - - name: "Task" - x-displayName: "Tasks" - description: | - A task is a container running on a swarm. It is the atomic scheduling unit - of swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Secret" - x-displayName: "Secrets" - description: | - Secrets are sensitive data that can be used by services. Swarm mode must - be enabled for these endpoints to work. - - name: "Config" - x-displayName: "Configs" - description: | - Configs are application configurations that can be used by services. Swarm - mode must be enabled for these endpoints to work. - # System things - - name: "Plugin" - x-displayName: "Plugins" - - name: "System" - x-displayName: "System" - -definitions: - Port: - type: "object" - description: "An open port on a container" - required: [PrivatePort, Type] - properties: - IP: - type: "string" - format: "ip-address" - description: "Host IP address that the container's port is mapped to" - PrivatePort: - type: "integer" - format: "uint16" - x-nullable: false - description: "Port on the container" - PublicPort: - type: "integer" - format: "uint16" - description: "Port exposed on the host" - Type: - type: "string" - x-nullable: false - enum: ["tcp", "udp", "sctp"] - example: - PrivatePort: 8080 - PublicPort: 80 - Type: "tcp" - - MountPoint: - type: "object" - description: | - MountPoint represents a mount point configuration inside the container. - This is used for reporting the mountpoints in use by a container. - properties: - Type: - description: | - The mount type: - - - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `image` a docker image - - `tmpfs` a `tmpfs`. - - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "image" - - "tmpfs" - - "npipe" - - "cluster" - example: "volume" - Name: - description: | - Name is the name reference to the underlying data defined by `Source` - e.g., the volume name. - type: "string" - example: "myvolume" - Source: - description: | - Source location of the mount. - - For volumes, this contains the storage location of the volume (within - `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains - the source (host) part of the bind-mount. For `tmpfs` mount points, this - field is empty. - type: "string" - example: "/var/lib/docker/volumes/myvolume/_data" - Destination: - description: | - Destination is the path relative to the container root (`/`) where - the `Source` is mounted inside the container. - type: "string" - example: "/usr/share/nginx/html/" - Driver: - description: | - Driver is the volume driver used to create the volume (if it is a volume). - type: "string" - example: "local" - Mode: - description: | - Mode is a comma separated list of options supplied by the user when - creating the bind/volume mount. - - The default is platform-specific (`"z"` on Linux, empty on Windows). - type: "string" - example: "z" - RW: - description: | - Whether the mount is mounted writable (read-write). - type: "boolean" - example: true - Propagation: - description: | - Propagation describes how mounts are propagated from the host into the - mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) - for details. This field is not used on Windows. - type: "string" - example: "" - - DeviceMapping: - type: "object" - description: "A device mapping between the host and container" - properties: - PathOnHost: - type: "string" - PathInContainer: - type: "string" - CgroupPermissions: - type: "string" - example: - PathOnHost: "/dev/deviceName" - PathInContainer: "/dev/deviceName" - CgroupPermissions: "mrw" - - DeviceRequest: - type: "object" - description: "A request for devices to be sent to device drivers" - properties: - Driver: - type: "string" - example: "nvidia" - Count: - type: "integer" - example: -1 - DeviceIDs: - type: "array" - items: - type: "string" - example: - - "0" - - "1" - - "GPU-fef8089b-4820-abfc-e83e-94318197576e" - Capabilities: - description: | - A list of capabilities; an OR list of AND lists of capabilities. - type: "array" - items: - type: "array" - items: - type: "string" - example: - # gpu AND nvidia AND compute - - ["gpu", "nvidia", "compute"] - Options: - description: | - Driver-specific options, specified as a key/value pairs. These options - are passed directly to the driver. - type: "object" - additionalProperties: - type: "string" - - ThrottleDevice: - type: "object" - properties: - Path: - description: "Device path" - type: "string" - Rate: - description: "Rate" - type: "integer" - format: "int64" - minimum: 0 - - Mount: - type: "object" - properties: - Target: - description: "Container path." - type: "string" - Source: - description: "Mount source (e.g. a volume name, a host path)." - type: "string" - Type: - description: | - The mount type. Available types: - - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `image` Mounts an image. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "image" - - "tmpfs" - - "npipe" - - "cluster" - ReadOnly: - description: "Whether the mount should be read-only." - type: "boolean" - Consistency: - description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." - type: "string" - BindOptions: - description: "Optional configuration for the `bind` type." - type: "object" - properties: - Propagation: - description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." - type: "string" - enum: - - "private" - - "rprivate" - - "shared" - - "rshared" - - "slave" - - "rslave" - NonRecursive: - description: "Disable recursive bind mount." - type: "boolean" - default: false - CreateMountpoint: - description: "Create mount point on host if missing" - type: "boolean" - default: false - ReadOnlyNonRecursive: - description: | - Make the mount non-recursively read-only, but still leave the mount recursive - (unless NonRecursive is set to `true` in conjunction). - - Added in v1.44, before that version all read-only mounts were - non-recursive by default. To match the previous behaviour this - will default to `true` for clients on versions prior to v1.44. - type: "boolean" - default: false - ReadOnlyForceRecursive: - description: "Raise an error if the mount cannot be made recursively read-only." - type: "boolean" - default: false - VolumeOptions: - description: "Optional configuration for the `volume` type." - type: "object" - properties: - NoCopy: - description: "Populate volume with data from the target." - type: "boolean" - default: false - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - DriverConfig: - description: "Map of driver specific options" - type: "object" - properties: - Name: - description: "Name of the driver to use to create the volume." - type: "string" - Options: - description: "key/value map of driver specific options." - type: "object" - additionalProperties: - type: "string" - Subpath: - description: "Source path inside the volume. Must be relative without any back traversals." - type: "string" - example: "dir-inside-volume/subdirectory" - ImageOptions: - description: "Optional configuration for the `image` type." - type: "object" - properties: - Subpath: - description: "Source path inside the image. Must be relative without any back traversals." - type: "string" - example: "dir-inside-image/subdirectory" - TmpfsOptions: - description: "Optional configuration for the `tmpfs` type." - type: "object" - properties: - SizeBytes: - description: "The size for the tmpfs mount in bytes." - type: "integer" - format: "int64" - Mode: - description: "The permission mode for the tmpfs mount in an integer." - type: "integer" - Options: - description: | - The options to be passed to the tmpfs mount. An array of arrays. - Flag options should be provided as 1-length arrays. Other types - should be provided as as 2-length arrays, where the first item is - the key and the second the value. - type: "array" - items: - type: "array" - minItems: 1 - maxItems: 2 - items: - type: "string" - example: - [["noexec"]] - - RestartPolicy: - description: | - The behavior to apply when the container exits. The default is not to - restart. - - An ever increasing delay (double the previous delay, starting at 100ms) is - added before each restart to prevent flooding the server. - type: "object" - properties: - Name: - type: "string" - description: | - - Empty string means not to restart - - `no` Do not automatically restart - - `always` Always restart - - `unless-stopped` Restart always except when the user has manually stopped the container - - `on-failure` Restart only when the container exit code is non-zero - enum: - - "" - - "no" - - "always" - - "unless-stopped" - - "on-failure" - MaximumRetryCount: - type: "integer" - description: | - If `on-failure` is used, the number of times to retry before giving up. - - Resources: - description: "A container's resources (cgroups config, ulimits, etc)" - type: "object" - properties: - # Applicable to all platforms - CpuShares: - description: | - An integer value representing this container's relative CPU weight - versus other containers. - type: "integer" - Memory: - description: "Memory limit in bytes." - type: "integer" - format: "int64" - default: 0 - # Applicable to UNIX platforms - CgroupParent: - description: | - Path to `cgroups` under which the container's `cgroup` is created. If - the path is not absolute, the path is considered to be relative to the - `cgroups` path of the init process. Cgroups are created if they do not - already exist. - type: "string" - BlkioWeight: - description: "Block IO weight (relative weight)." - type: "integer" - minimum: 0 - maximum: 1000 - BlkioWeightDevice: - description: | - Block IO weight (relative device weight) in the form: - - ``` - [{"Path": "device_path", "Weight": weight}] - ``` - type: "array" - items: - type: "object" - properties: - Path: - type: "string" - Weight: - type: "integer" - minimum: 0 - BlkioDeviceReadBps: - description: | - Limit read rate (bytes per second) from a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteBps: - description: | - Limit write rate (bytes per second) to a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceReadIOps: - description: | - Limit read rate (IO per second) from a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteIOps: - description: | - Limit write rate (IO per second) to a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - CpuPeriod: - description: "The length of a CPU period in microseconds." - type: "integer" - format: "int64" - CpuQuota: - description: | - Microseconds of CPU time that the container can get in a CPU period. - type: "integer" - format: "int64" - CpuRealtimePeriod: - description: | - The length of a CPU real-time period in microseconds. Set to 0 to - allocate no time allocated to real-time tasks. - type: "integer" - format: "int64" - CpuRealtimeRuntime: - description: | - The length of a CPU real-time runtime in microseconds. Set to 0 to - allocate no time allocated to real-time tasks. - type: "integer" - format: "int64" - CpusetCpus: - description: | - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - type: "string" - example: "0-3" - CpusetMems: - description: | - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only - effective on NUMA systems. - type: "string" - Devices: - description: "A list of devices to add to the container." - type: "array" - items: - $ref: "#/definitions/DeviceMapping" - DeviceCgroupRules: - description: "a list of cgroup rules to apply to the container" - type: "array" - items: - type: "string" - example: "c 13:* rwm" - DeviceRequests: - description: | - A list of requests for devices to be sent to device drivers. - type: "array" - items: - $ref: "#/definitions/DeviceRequest" - KernelMemoryTCP: - description: | - Hard limit for kernel TCP buffer memory (in bytes). Depending on the - OCI runtime in use, this option may be ignored. It is no longer supported - by the default (runc) runtime. - - This field is omitted when empty. - type: "integer" - format: "int64" - MemoryReservation: - description: "Memory soft limit in bytes." - type: "integer" - format: "int64" - MemorySwap: - description: | - Total memory limit (memory + swap). Set as `-1` to enable unlimited - swap. - type: "integer" - format: "int64" - MemorySwappiness: - description: | - Tune a container's memory swappiness behavior. Accepts an integer - between 0 and 100. - type: "integer" - format: "int64" - minimum: 0 - maximum: 100 - NanoCpus: - description: "CPU quota in units of 10-9 CPUs." - type: "integer" - format: "int64" - OomKillDisable: - description: "Disable OOM Killer for the container." - type: "boolean" - Init: - description: | - Run an init inside the container that forwards signals and reaps - processes. This field is omitted if empty, and the default (as - configured on the daemon) is used. - type: "boolean" - x-nullable: true - PidsLimit: - description: | - Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` - to not change. - type: "integer" - format: "int64" - x-nullable: true - Ulimits: - description: | - A list of resource limits to set in the container. For example: - - ``` - {"Name": "nofile", "Soft": 1024, "Hard": 2048} - ``` - type: "array" - items: - type: "object" - properties: - Name: - description: "Name of ulimit" - type: "string" - Soft: - description: "Soft limit" - type: "integer" - Hard: - description: "Hard limit" - type: "integer" - # Applicable to Windows - CpuCount: - description: | - The number of usable CPUs (Windows only). - - On Windows Server containers, the processor resource controls are - mutually exclusive. The order of precedence is `CPUCount` first, then - `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - CpuPercent: - description: | - The usable percentage of the available CPUs (Windows only). - - On Windows Server containers, the processor resource controls are - mutually exclusive. The order of precedence is `CPUCount` first, then - `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - IOMaximumIOps: - description: "Maximum IOps for the container system drive (Windows only)" - type: "integer" - format: "int64" - IOMaximumBandwidth: - description: | - Maximum IO in bytes per second for the container system drive - (Windows only). - type: "integer" - format: "int64" - - Limit: - description: | - An object describing a limit on resources which can be requested by a task. - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - example: 4000000000 - MemoryBytes: - type: "integer" - format: "int64" - example: 8272408576 - Pids: - description: | - Limits the maximum number of PIDs in the container. Set `0` for unlimited. - type: "integer" - format: "int64" - default: 0 - example: 100 - - ResourceObject: - description: | - An object describing the resources which can be advertised by a node and - requested by a task. - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - example: 4000000000 - MemoryBytes: - type: "integer" - format: "int64" - example: 8272408576 - GenericResources: - $ref: "#/definitions/GenericResources" - - GenericResources: - description: | - User-defined resources can be either Integer resources (e.g, `SSD=3`) or - String resources (e.g, `GPU=UUID1`). - type: "array" - items: - type: "object" - properties: - NamedResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "string" - DiscreteResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "integer" - format: "int64" - example: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - HealthConfig: - description: "A test to perform to check that the container is healthy." - type: "object" - properties: - Test: - description: | - The test to perform. Possible values are: - - - `[]` inherit healthcheck from image or parent image - - `["NONE"]` disable healthcheck - - `["CMD", args...]` exec arguments directly - - `["CMD-SHELL", command]` run command with system's default shell - type: "array" - items: - type: "string" - Interval: - description: | - The time to wait between checks in nanoseconds. It should be 0 or at - least 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - Timeout: - description: | - The time to wait before considering the check to have hung. It should - be 0 or at least 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - Retries: - description: | - The number of consecutive failures needed to consider a container as - unhealthy. 0 means inherit. - type: "integer" - StartPeriod: - description: | - Start period for the container to initialize before starting - health-retries countdown in nanoseconds. It should be 0 or at least - 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - StartInterval: - description: | - The time to wait between checks in nanoseconds during the start period. - It should be 0 or at least 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - - Health: - description: | - Health stores information about the container's healthcheck results. - type: "object" - x-nullable: true - properties: - Status: - description: | - Status is one of `none`, `starting`, `healthy` or `unhealthy` - - - "none" Indicates there is no healthcheck - - "starting" Starting indicates that the container is not yet ready - - "healthy" Healthy indicates that the container is running correctly - - "unhealthy" Unhealthy indicates that the container has a problem - type: "string" - enum: - - "none" - - "starting" - - "healthy" - - "unhealthy" - example: "healthy" - FailingStreak: - description: "FailingStreak is the number of consecutive failures" - type: "integer" - example: 0 - Log: - type: "array" - description: | - Log contains the last few results (oldest first) - items: - $ref: "#/definitions/HealthcheckResult" - - HealthcheckResult: - description: | - HealthcheckResult stores information about a single run of a healthcheck probe - type: "object" - x-nullable: true - properties: - Start: - description: | - Date and time at which this check started in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "date-time" - example: "2020-01-04T10:44:24.496525531Z" - End: - description: | - Date and time at which this check ended in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2020-01-04T10:45:21.364524523Z" - ExitCode: - description: | - ExitCode meanings: - - - `0` healthy - - `1` unhealthy - - `2` reserved (considered unhealthy) - - other values: error running probe - type: "integer" - example: 0 - Output: - description: "Output from last check" - type: "string" - - HostConfig: - description: "Container configuration that depends on the host we are running on" - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - # Applicable to all platforms - Binds: - type: "array" - description: | - A list of volume bindings for this container. Each volume binding - is a string in one of these forms: - - - `host-src:container-dest[:options]` to bind-mount a host path - into the container. Both `host-src`, and `container-dest` must - be an _absolute_ path. - - `volume-name:container-dest[:options]` to bind-mount a volume - managed by a volume driver into the container. `container-dest` - must be an _absolute_ path. - - `options` is an optional, comma-delimited list of: - - - `nocopy` disables automatic copying of data from the container - path to the volume. The `nocopy` flag only applies to named volumes. - - `[ro|rw]` mounts a volume read-only or read-write, respectively. - If omitted or set to `rw`, volumes are mounted read-write. - - `[z|Z]` applies SELinux labels to allow or deny multiple containers - to read and write to the same volume. - - `z`: a _shared_ content label is applied to the content. This - label indicates that multiple containers can share the volume - content, for both reading and writing. - - `Z`: a _private unshared_ label is applied to the content. - This label indicates that only the current container can use - a private volume. Labeling systems such as SELinux require - proper labels to be placed on volume content that is mounted - into a container. Without a label, the security system can - prevent a container's processes from using the content. By - default, the labels set by the host operating system are not - modified. - - `[[r]shared|[r]slave|[r]private]` specifies mount - [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). - This only applies to bind-mounted volumes, not internal volumes - or named volumes. Mount propagation requires the source mount - point (the location where the source directory is mounted in the - host operating system) to have the correct propagation properties. - For shared volumes, the source mount point must be set to `shared`. - For slave volumes, the mount must be set to either `shared` or - `slave`. - items: - type: "string" - ContainerIDFile: - type: "string" - description: "Path to a file where the container ID is written" - example: "" - LogConfig: - type: "object" - description: "The logging configuration for this container" - properties: - Type: - description: |- - Name of the logging driver used for the container or "none" - if logging is disabled. - type: "string" - enum: - - "local" - - "json-file" - - "syslog" - - "journald" - - "gelf" - - "fluentd" - - "awslogs" - - "splunk" - - "etwlogs" - - "none" - Config: - description: |- - Driver-specific configuration options for the logging driver. - type: "object" - additionalProperties: - type: "string" - example: - "max-file": "5" - "max-size": "10m" - NetworkMode: - type: "string" - description: | - Network mode to use for this container. Supported standard values - are: `bridge`, `host`, `none`, and `container:`. Any - other value is taken as a custom network's name to which this - container should connect to. - PortBindings: - $ref: "#/definitions/PortMap" - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - AutoRemove: - type: "boolean" - description: | - Automatically remove the container when the container's process - exits. This has no effect if `RestartPolicy` is set. - VolumeDriver: - type: "string" - description: "Driver that this container uses to mount volumes." - VolumesFrom: - type: "array" - description: | - A list of volumes to inherit from another container, specified in - the form `[:]`. - items: - type: "string" - Mounts: - description: | - Specification for mounts to be added to the container. - type: "array" - items: - $ref: "#/definitions/Mount" - ConsoleSize: - type: "array" - description: | - Initial console size, as an `[height, width]` array. - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - example: [80, 64] - Annotations: - type: "object" - description: | - Arbitrary non-identifying metadata attached to container and - provided to the runtime when the container is started. - additionalProperties: - type: "string" - - # Applicable to UNIX platforms - CapAdd: - type: "array" - description: | - A list of kernel capabilities to add to the container. Conflicts - with option 'Capabilities'. - items: - type: "string" - CapDrop: - type: "array" - description: | - A list of kernel capabilities to drop from the container. Conflicts - with option 'Capabilities'. - items: - type: "string" - CgroupnsMode: - type: "string" - enum: - - "private" - - "host" - description: | - cgroup namespace mode for the container. Possible values are: - - - `"private"`: the container runs in its own private cgroup namespace - - `"host"`: use the host system's cgroup namespace - - If not specified, the daemon default is used, which can either be `"private"` - or `"host"`, depending on daemon version, kernel support and configuration. - Dns: - type: "array" - description: "A list of DNS servers for the container to use." - items: - type: "string" - DnsOptions: - type: "array" - description: "A list of DNS options." - items: - type: "string" - DnsSearch: - type: "array" - description: "A list of DNS search domains." - items: - type: "string" - ExtraHosts: - type: "array" - description: | - A list of hostnames/IP mappings to add to the container's `/etc/hosts` - file. Specified in the form `["hostname:IP"]`. - items: - type: "string" - GroupAdd: - type: "array" - description: | - A list of additional groups that the container process will run as. - items: - type: "string" - IpcMode: - type: "string" - description: | - IPC sharing mode for the container. Possible values are: - - - `"none"`: own private IPC namespace, with /dev/shm not mounted - - `"private"`: own private IPC namespace - - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - - `"container:"`: join another (shareable) container's IPC namespace - - `"host"`: use the host system's IPC namespace - - If not specified, daemon default is used, which can either be `"private"` - or `"shareable"`, depending on daemon version and configuration. - Cgroup: - type: "string" - description: "Cgroup to use for the container." - Links: - type: "array" - description: | - A list of links for the container in the form `container_name:alias`. - items: - type: "string" - OomScoreAdj: - type: "integer" - description: | - An integer value containing the score given to the container in - order to tune OOM killer preferences. - example: 500 - PidMode: - type: "string" - description: | - Set the PID (Process) Namespace mode for the container. It can be - either: - - - `"container:"`: joins another container's PID namespace - - `"host"`: use the host's PID namespace inside the container - Privileged: - type: "boolean" - description: |- - Gives the container full access to the host. - PublishAllPorts: - type: "boolean" - description: | - Allocates an ephemeral host port for all of a container's - exposed ports. - - Ports are de-allocated when the container stops and allocated when - the container starts. The allocated port might be changed when - restarting the container. - - The port is selected from the ephemeral port range that depends on - the kernel. For example, on Linux the range is defined by - `/proc/sys/net/ipv4/ip_local_port_range`. - ReadonlyRootfs: - type: "boolean" - description: "Mount the container's root filesystem as read only." - SecurityOpt: - type: "array" - description: | - A list of string values to customize labels for MLS systems, such - as SELinux. - items: - type: "string" - StorageOpt: - type: "object" - description: | - Storage driver options for this container, in the form `{"size": "120G"}`. - additionalProperties: - type: "string" - Tmpfs: - type: "object" - description: | - A map of container directories which should be replaced by tmpfs - mounts, and their corresponding mount options. For example: - - ``` - { "/run": "rw,noexec,nosuid,size=65536k" } - ``` - additionalProperties: - type: "string" - UTSMode: - type: "string" - description: "UTS namespace to use for the container." - UsernsMode: - type: "string" - description: | - Sets the usernamespace mode for the container when usernamespace - remapping option is enabled. - ShmSize: - type: "integer" - format: "int64" - description: | - Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. - minimum: 0 - Sysctls: - type: "object" - x-nullable: true - description: |- - A list of kernel parameters (sysctls) to set in the container. - - This field is omitted if not set. - additionalProperties: - type: "string" - example: - "net.ipv4.ip_forward": "1" - Runtime: - type: "string" - x-nullable: true - description: |- - Runtime to use with this container. - # Applicable to Windows - Isolation: - type: "string" - description: | - Isolation technology of the container. (Windows only) - enum: - - "default" - - "process" - - "hyperv" - - "" - MaskedPaths: - type: "array" - description: | - The list of paths to be masked inside the container (this overrides - the default set of paths). - items: - type: "string" - example: - - "/proc/asound" - - "/proc/acpi" - - "/proc/kcore" - - "/proc/keys" - - "/proc/latency_stats" - - "/proc/timer_list" - - "/proc/timer_stats" - - "/proc/sched_debug" - - "/proc/scsi" - - "/sys/firmware" - - "/sys/devices/virtual/powercap" - ReadonlyPaths: - type: "array" - description: | - The list of paths to be set as read-only inside the container - (this overrides the default set of paths). - items: - type: "string" - example: - - "/proc/bus" - - "/proc/fs" - - "/proc/irq" - - "/proc/sys" - - "/proc/sysrq-trigger" - - ContainerConfig: - description: | - Configuration for a container that is portable between hosts. - type: "object" - properties: - Hostname: - description: | - The hostname to use for the container, as a valid RFC 1123 hostname. - type: "string" - example: "439f4e91bd1d" - Domainname: - description: | - The domain name to use for the container. - type: "string" - User: - description: |- - Commands run as this user inside the container. If omitted, commands - run as the user specified in the image the container was started from. - - Can be either user-name or UID, and optional group-name or GID, - separated by a colon (`[<:group-name|GID>]`). - type: "string" - example: "123:456" - AttachStdin: - description: "Whether to attach to `stdin`." - type: "boolean" - default: false - AttachStdout: - description: "Whether to attach to `stdout`." - type: "boolean" - default: true - AttachStderr: - description: "Whether to attach to `stderr`." - type: "boolean" - default: true - ExposedPorts: - description: | - An object mapping ports to an empty object in the form: - - `{"/": {}}` - type: "object" - x-nullable: true - additionalProperties: - type: "object" - enum: - - {} - default: {} - example: { - "80/tcp": {}, - "443/tcp": {} - } - Tty: - description: | - Attach standard streams to a TTY, including `stdin` if it is not closed. - type: "boolean" - default: false - OpenStdin: - description: "Open `stdin`" - type: "boolean" - default: false - StdinOnce: - description: "Close `stdin` after one attached client disconnects" - type: "boolean" - default: false - Env: - description: | - A list of environment variables to set inside the container in the - form `["VAR=value", ...]`. A variable without `=` is removed from the - environment, rather than to have an empty value. - type: "array" - items: - type: "string" - example: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - description: | - Command to run specified as a string or an array of strings. - type: "array" - items: - type: "string" - example: ["/bin/sh"] - Healthcheck: - $ref: "#/definitions/HealthConfig" - ArgsEscaped: - description: "Command is already escaped (Windows only)" - type: "boolean" - default: false - example: false - x-nullable: true - Image: - description: | - The name (or reference) of the image to use when creating the container, - or which was used when the container was created. - type: "string" - example: "example-image:1.0" - Volumes: - description: | - An object mapping mount point paths inside the container to empty - objects. - type: "object" - additionalProperties: - type: "object" - enum: - - {} - default: {} - WorkingDir: - description: "The working directory for commands to run in." - type: "string" - example: "/public/" - Entrypoint: - description: | - The entry point for the container as a string or an array of strings. - - If the array consists of exactly one empty string (`[""]`) then the - entry point is reset to system default (i.e., the entry point used by - docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). - type: "array" - items: - type: "string" - example: [] - NetworkDisabled: - description: "Disable networking for the container." - type: "boolean" - x-nullable: true - MacAddress: - description: | - MAC address of the container. - - Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. - type: "string" - x-nullable: true - OnBuild: - description: | - `ONBUILD` metadata that were defined in the image's `Dockerfile`. - type: "array" - x-nullable: true - items: - type: "string" - example: [] - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - StopSignal: - description: | - Signal to stop a container as a string or unsigned integer. - type: "string" - example: "SIGTERM" - x-nullable: true - StopTimeout: - description: "Timeout to stop a container in seconds." - type: "integer" - default: 10 - x-nullable: true - Shell: - description: | - Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. - type: "array" - x-nullable: true - items: - type: "string" - example: ["/bin/sh", "-c"] - - ImageConfig: - description: | - Configuration of the image. These fields are used as defaults - when starting a container from the image. - type: "object" - properties: - User: - description: "The user that commands are run as inside the container." - type: "string" - example: "web:web" - ExposedPorts: - description: | - An object mapping ports to an empty object in the form: - - `{"/": {}}` - type: "object" - x-nullable: true - additionalProperties: - type: "object" - enum: - - {} - default: {} - example: { - "80/tcp": {}, - "443/tcp": {} - } - Env: - description: | - A list of environment variables to set inside the container in the - form `["VAR=value", ...]`. A variable without `=` is removed from the - environment, rather than to have an empty value. - type: "array" - items: - type: "string" - example: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - description: | - Command to run specified as a string or an array of strings. - type: "array" - items: - type: "string" - example: ["/bin/sh"] - Healthcheck: - $ref: "#/definitions/HealthConfig" - ArgsEscaped: - description: "Command is already escaped (Windows only)" - type: "boolean" - default: false - example: false - x-nullable: true - Volumes: - description: | - An object mapping mount point paths inside the container to empty - objects. - type: "object" - additionalProperties: - type: "object" - enum: - - {} - default: {} - example: - "/app/data": {} - "/app/config": {} - WorkingDir: - description: "The working directory for commands to run in." - type: "string" - example: "/public/" - Entrypoint: - description: | - The entry point for the container as a string or an array of strings. - - If the array consists of exactly one empty string (`[""]`) then the - entry point is reset to system default (i.e., the entry point used by - docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). - type: "array" - items: - type: "string" - example: [] - OnBuild: - description: | - `ONBUILD` metadata that were defined in the image's `Dockerfile`. - type: "array" - x-nullable: true - items: - type: "string" - example: [] - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - StopSignal: - description: | - Signal to stop a container as a string or unsigned integer. - type: "string" - example: "SIGTERM" - x-nullable: true - Shell: - description: | - Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. - type: "array" - x-nullable: true - items: - type: "string" - example: ["/bin/sh", "-c"] - # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. - example: - "User": "web:web" - "ExposedPorts": { - "80/tcp": {}, - "443/tcp": {} - } - "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] - "Cmd": ["/bin/sh"] - "Healthcheck": { - "Test": ["string"], - "Interval": 0, - "Timeout": 0, - "Retries": 0, - "StartPeriod": 0, - "StartInterval": 0 - } - "ArgsEscaped": true - "Volumes": { - "/app/data": {}, - "/app/config": {} - } - "WorkingDir": "/public/" - "Entrypoint": [] - "OnBuild": [] - "Labels": { - "com.example.some-label": "some-value", - "com.example.some-other-label": "some-other-value" - } - "StopSignal": "SIGTERM" - "Shell": ["/bin/sh", "-c"] - - NetworkingConfig: - description: | - NetworkingConfig represents the container's networking configuration for - each of its interfaces. - It is used for the networking configs specified in the `docker create` - and `docker network connect` commands. - type: "object" - properties: - EndpointsConfig: - description: | - A mapping of network name to endpoint configuration for that network. - The endpoint configuration can be left empty to connect to that - network with no particular endpoint configuration. - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - example: - # putting an example here, instead of using the example values from - # /definitions/EndpointSettings, because EndpointSettings contains - # operational data returned when inspecting a container that we don't - # accept here. - EndpointsConfig: - isolated_nw: - IPAMConfig: - IPv4Address: "172.20.30.33" - IPv6Address: "2001:db8:abcd::3033" - LinkLocalIPs: - - "169.254.34.68" - - "fe80::3468" - MacAddress: "02:42:ac:12:05:02" - Links: - - "container_1" - - "container_2" - Aliases: - - "server_x" - - "server_y" - database_nw: {} - - NetworkSettings: - description: "NetworkSettings exposes the network settings in the API" - type: "object" - properties: - Bridge: - description: | - Name of the default bridge interface when dockerd's --bridge flag is set. - type: "string" - example: "docker0" - SandboxID: - description: SandboxID uniquely represents a container's network stack. - type: "string" - example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" - HairpinMode: - description: | - Indicates if hairpin NAT should be enabled on the virtual interface. - - Deprecated: This field is never set and will be removed in a future release. - type: "boolean" - example: false - LinkLocalIPv6Address: - description: | - IPv6 unicast address using the link-local prefix. - - Deprecated: This field is never set and will be removed in a future release. - type: "string" - example: "" - LinkLocalIPv6PrefixLen: - description: | - Prefix length of the IPv6 unicast address. - - Deprecated: This field is never set and will be removed in a future release. - type: "integer" - example: "" - Ports: - $ref: "#/definitions/PortMap" - SandboxKey: - description: SandboxKey is the full path of the netns handle - type: "string" - example: "/var/run/docker/netns/8ab54b426c38" - - SecondaryIPAddresses: - description: "Deprecated: This field is never set and will be removed in a future release." - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - SecondaryIPv6Addresses: - description: "Deprecated: This field is never set and will be removed in a future release." - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - # TODO properties below are part of DefaultNetworkSettings, which is - # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 - EndpointID: - description: | - EndpointID uniquely represents a service endpoint in a Sandbox. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.1" - GlobalIPv6Address: - description: | - Global IPv6 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 64 - IPAddress: - description: | - IPv4 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address for this network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8:2::100" - MacAddress: - description: | - MAC address for the container on the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "02:42:ac:11:00:04" - Networks: - description: | - Information about all networks that the container is connected to. - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - - Address: - description: Address represents an IPv4 or IPv6 IP address. - type: "object" - properties: - Addr: - description: IP address. - type: "string" - PrefixLen: - description: Mask length of the IP address. - type: "integer" - - PortMap: - description: | - PortMap describes the mapping of container ports to host ports, using the - container's port-number and protocol as key in the format `/`, - for example, `80/udp`. - - If a container's port is mapped for multiple protocols, separate entries - are added to the mapping table. - type: "object" - additionalProperties: - type: "array" - x-nullable: true - items: - $ref: "#/definitions/PortBinding" - example: - "443/tcp": - - HostIp: "127.0.0.1" - HostPort: "4443" - "80/tcp": - - HostIp: "0.0.0.0" - HostPort: "80" - - HostIp: "0.0.0.0" - HostPort: "8080" - "80/udp": - - HostIp: "0.0.0.0" - HostPort: "80" - "53/udp": - - HostIp: "0.0.0.0" - HostPort: "53" - "2377/tcp": null - - PortBinding: - description: | - PortBinding represents a binding between a host IP address and a host - port. - type: "object" - properties: - HostIp: - description: "Host IP address that the container's port is mapped to." - type: "string" - example: "127.0.0.1" - HostPort: - description: "Host port number that the container's port is mapped to." - type: "string" - example: "4443" - - DriverData: - description: | - Information about the storage driver used to store the container's and - image's filesystem. - type: "object" - required: [Name, Data] - properties: - Name: - description: "Name of the storage driver." - type: "string" - x-nullable: false - example: "overlay2" - Data: - description: | - Low-level storage metadata, provided as key/value pairs. - - This information is driver-specific, and depends on the storage-driver - in use, and should be used for informational purposes only. - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: { - "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", - "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", - "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" - } - - FilesystemChange: - description: | - Change in the container's filesystem. - type: "object" - required: [Path, Kind] - properties: - Path: - description: | - Path to file or directory that has changed. - type: "string" - x-nullable: false - Kind: - $ref: "#/definitions/ChangeType" - - ChangeType: - description: | - Kind of change - - Can be one of: - - - `0`: Modified ("C") - - `1`: Added ("A") - - `2`: Deleted ("D") - type: "integer" - format: "uint8" - enum: [0, 1, 2] - x-nullable: false - - ImageInspect: - description: | - Information about an image in the local image cache. - type: "object" - properties: - Id: - description: | - ID is the content-addressable ID of an image. - - This identifier is a content-addressable digest calculated from the - image's configuration (which includes the digests of layers used by - the image). - - Note that this digest differs from the `RepoDigests` below, which - holds digests of image manifests that reference the image. - type: "string" - x-nullable: false - example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" - Descriptor: - description: | - Descriptor is an OCI descriptor of the image target. - In case of a multi-platform image, this descriptor points to the OCI index - or a manifest list. - - This field is only present if the daemon provides a multi-platform image store. - - WARNING: This is experimental and may change at any time without any backward - compatibility. - x-nullable: true - $ref: "#/definitions/OCIDescriptor" - Manifests: - description: | - Manifests is a list of image manifests available in this image. It - provides a more detailed view of the platform-specific image manifests or - other image-attached data like build attestations. - - Only available if the daemon provides a multi-platform image store - and the `manifests` option is set in the inspect request. - - WARNING: This is experimental and may change at any time without any backward - compatibility. - type: "array" - x-nullable: true - items: - $ref: "#/definitions/ImageManifestSummary" - RepoTags: - description: | - List of image names/tags in the local image cache that reference this - image. - - Multiple image tags can refer to the same image, and this list may be - empty if no tags reference the image, in which case the image is - "untagged", in which case it can still be referenced by its ID. - type: "array" - items: - type: "string" - example: - - "example:1.0" - - "example:latest" - - "example:stable" - - "internal.registry.example.com:5000/example:1.0" - RepoDigests: - description: | - List of content-addressable digests of locally available image manifests - that the image is referenced from. Multiple manifests can refer to the - same image. - - These digests are usually only available if the image was either pulled - from a registry, or if the image was pushed to a registry, which is when - the manifest is generated and its digest calculated. - type: "array" - items: - type: "string" - example: - - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" - - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - Parent: - description: | - ID of the parent image. - - Depending on how the image was created, this field may be empty and - is only set for images that were built/created locally. This field - is empty if the image was pulled from an image registry. - type: "string" - x-nullable: false - example: "" - Comment: - description: | - Optional message that was set when committing or importing the image. - type: "string" - x-nullable: false - example: "" - Created: - description: | - Date and time at which the image was created, formatted in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - - This information is only available if present in the image, - and omitted otherwise. - type: "string" - format: "dateTime" - x-nullable: true - example: "2022-02-04T21:20:12.497794809Z" - DockerVersion: - description: | - The version of Docker that was used to build the image. - - Depending on how the image was created, this field may be empty. - type: "string" - x-nullable: false - example: "27.0.1" - Author: - description: | - Name of the author that was specified when committing the image, or as - specified through MAINTAINER (deprecated) in the Dockerfile. - type: "string" - x-nullable: false - example: "" - Config: - $ref: "#/definitions/ImageConfig" - Architecture: - description: | - Hardware CPU architecture that the image runs on. - type: "string" - x-nullable: false - example: "arm" - Variant: - description: | - CPU architecture variant (presently ARM-only). - type: "string" - x-nullable: true - example: "v7" - Os: - description: | - Operating System the image is built to run on. - type: "string" - x-nullable: false - example: "linux" - OsVersion: - description: | - Operating System version the image is built to run on (especially - for Windows). - type: "string" - example: "" - x-nullable: true - Size: - description: | - Total size of the image including all layers it is composed of. - type: "integer" - format: "int64" - x-nullable: false - example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 1239828 - GraphDriver: - $ref: "#/definitions/DriverData" - RootFS: - description: | - Information about the image's RootFS, including the layer IDs. - type: "object" - required: [Type] - properties: - Type: - type: "string" - x-nullable: false - example: "layers" - Layers: - type: "array" - items: - type: "string" - example: - - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - Metadata: - description: | - Additional metadata of the image in the local cache. This information - is local to the daemon, and not part of the image itself. - type: "object" - properties: - LastTagTime: - description: | - Date and time at which the image was last tagged in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - - This information is only available if the image was tagged locally, - and omitted otherwise. - type: "string" - format: "dateTime" - example: "2022-02-28T14:40:02.623929178Z" - x-nullable: true - - ImageSummary: - type: "object" - x-go-name: "Summary" - required: - - Id - - ParentId - - RepoTags - - RepoDigests - - Created - - Size - - SharedSize - - Labels - - Containers - properties: - Id: - description: | - ID is the content-addressable ID of an image. - - This identifier is a content-addressable digest calculated from the - image's configuration (which includes the digests of layers used by - the image). - - Note that this digest differs from the `RepoDigests` below, which - holds digests of image manifests that reference the image. - type: "string" - x-nullable: false - example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" - ParentId: - description: | - ID of the parent image. - - Depending on how the image was created, this field may be empty and - is only set for images that were built/created locally. This field - is empty if the image was pulled from an image registry. - type: "string" - x-nullable: false - example: "" - RepoTags: - description: | - List of image names/tags in the local image cache that reference this - image. - - Multiple image tags can refer to the same image, and this list may be - empty if no tags reference the image, in which case the image is - "untagged", in which case it can still be referenced by its ID. - type: "array" - x-nullable: false - items: - type: "string" - example: - - "example:1.0" - - "example:latest" - - "example:stable" - - "internal.registry.example.com:5000/example:1.0" - RepoDigests: - description: | - List of content-addressable digests of locally available image manifests - that the image is referenced from. Multiple manifests can refer to the - same image. - - These digests are usually only available if the image was either pulled - from a registry, or if the image was pushed to a registry, which is when - the manifest is generated and its digest calculated. - type: "array" - x-nullable: false - items: - type: "string" - example: - - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" - - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - Created: - description: | - Date and time at which the image was created as a Unix timestamp - (number of seconds since EPOCH). - type: "integer" - x-nullable: false - example: "1644009612" - Size: - description: | - Total size of the image including all layers it is composed of. - type: "integer" - format: "int64" - x-nullable: false - example: 172064416 - SharedSize: - description: | - Total size of image layers that are shared between this image and other - images. - - This size is not calculated by default. `-1` indicates that the value - has not been set / calculated. - type: "integer" - format: "int64" - x-nullable: false - example: 1239828 - VirtualSize: - description: |- - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 172064416 - Labels: - description: "User-defined key/value metadata." - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Containers: - description: | - Number of containers using this image. Includes both stopped and running - containers. - - This size is not calculated by default, and depends on which API endpoint - is used. `-1` indicates that the value has not been set / calculated. - x-nullable: false - type: "integer" - example: 2 - Manifests: - description: | - Manifests is a list of manifests available in this image. - It provides a more detailed view of the platform-specific image manifests - or other image-attached data like build attestations. - - WARNING: This is experimental and may change at any time without any backward - compatibility. - type: "array" - x-nullable: false - x-omitempty: true - items: - $ref: "#/definitions/ImageManifestSummary" - Descriptor: - description: | - Descriptor is an OCI descriptor of the image target. - In case of a multi-platform image, this descriptor points to the OCI index - or a manifest list. - - This field is only present if the daemon provides a multi-platform image store. - - WARNING: This is experimental and may change at any time without any backward - compatibility. - x-nullable: true - $ref: "#/definitions/OCIDescriptor" - - AuthConfig: - type: "object" - properties: - username: - type: "string" - password: - type: "string" - email: - type: "string" - serveraddress: - type: "string" - example: - username: "hannibal" - password: "xxxx" - serveraddress: "https://index.docker.io/v1/" - - ProcessConfig: - type: "object" - properties: - privileged: - type: "boolean" - user: - type: "string" - tty: - type: "boolean" - entrypoint: - type: "string" - arguments: - type: "array" - items: - type: "string" - - Volume: - type: "object" - required: [Name, Driver, Mountpoint, Labels, Scope, Options] - properties: - Name: - type: "string" - description: "Name of the volume." - x-nullable: false - example: "tardis" - Driver: - type: "string" - description: "Name of the volume driver used by the volume." - x-nullable: false - example: "custom" - Mountpoint: - type: "string" - description: "Mount path of the volume on the host." - x-nullable: false - example: "/var/lib/docker/volumes/tardis" - CreatedAt: - type: "string" - format: "dateTime" - description: "Date/Time the volume was created." - example: "2016-06-07T20:31:11.853781916Z" - Status: - type: "object" - description: | - Low-level details about the volume, provided by the volume driver. - Details are returned as a map with key/value pairs: - `{"key":"value","key2":"value2"}`. - - The `Status` field is optional, and is omitted if the volume driver - does not support this feature. - additionalProperties: - type: "object" - example: - hello: "world" - Labels: - type: "object" - description: "User-defined key/value metadata." - x-nullable: false - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: - type: "string" - description: | - The level at which the volume exists. Either `global` for cluster-wide, - or `local` for machine level. - default: "local" - x-nullable: false - enum: ["local", "global"] - example: "local" - ClusterVolume: - $ref: "#/definitions/ClusterVolume" - Options: - type: "object" - description: | - The driver specific options used when creating the volume. - additionalProperties: - type: "string" - example: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - UsageData: - type: "object" - x-nullable: true - x-go-name: "UsageData" - required: [Size, RefCount] - description: | - Usage details about the volume. This information is used by the - `GET /system/df` endpoint, and omitted in other endpoints. - properties: - Size: - type: "integer" - format: "int64" - default: -1 - description: | - Amount of disk space used by the volume (in bytes). This information - is only available for volumes created with the `"local"` volume - driver. For volumes created with other volume drivers, this field - is set to `-1` ("not available") - x-nullable: false - RefCount: - type: "integer" - format: "int64" - default: -1 - description: | - The number of containers referencing this volume. This field - is set to `-1` if the reference-count is not available. - x-nullable: false - - VolumeCreateOptions: - description: "Volume configuration" - type: "object" - title: "VolumeConfig" - x-go-name: "CreateOptions" - properties: - Name: - description: | - The new volume's name. If not specified, Docker generates a name. - type: "string" - x-nullable: false - example: "tardis" - Driver: - description: "Name of the volume driver to use." - type: "string" - default: "local" - x-nullable: false - example: "custom" - DriverOpts: - description: | - A mapping of driver options and values. These options are - passed directly to the driver and are driver specific. - type: "object" - additionalProperties: - type: "string" - example: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - ClusterVolumeSpec: - $ref: "#/definitions/ClusterVolumeSpec" - - VolumeListResponse: - type: "object" - title: "VolumeListResponse" - x-go-name: "ListResponse" - description: "Volume list response" - properties: - Volumes: - type: "array" - description: "List of volumes" - items: - $ref: "#/definitions/Volume" - Warnings: - type: "array" - description: | - Warnings that occurred when fetching the list of volumes. - items: - type: "string" - example: [] - - Network: - type: "object" - properties: - Name: - description: | - Name of the network. - type: "string" - example: "my_network" - Id: - description: | - ID that uniquely identifies a network on a single machine. - type: "string" - example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" - Created: - description: | - Date and time at which the network was created in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-10-19T04:33:30.360899459Z" - Scope: - description: | - The level at which the network exists (e.g. `swarm` for cluster-wide - or `local` for machine level) - type: "string" - example: "local" - Driver: - description: | - The name of the driver used to create the network (e.g. `bridge`, - `overlay`). - type: "string" - example: "overlay" - EnableIPv4: - description: | - Whether the network was created with IPv4 enabled. - type: "boolean" - example: true - EnableIPv6: - description: | - Whether the network was created with IPv6 enabled. - type: "boolean" - example: false - IPAM: - $ref: "#/definitions/IPAM" - Internal: - description: | - Whether the network is created to only allow internal networking - connectivity. - type: "boolean" - default: false - example: false - Attachable: - description: | - Whether a global / swarm scope network is manually attachable by regular - containers from workers in swarm mode. - type: "boolean" - default: false - example: false - Ingress: - description: | - Whether the network is providing the routing-mesh for the swarm cluster. - type: "boolean" - default: false - example: false - ConfigFrom: - $ref: "#/definitions/ConfigReference" - ConfigOnly: - description: | - Whether the network is a config-only network. Config-only networks are - placeholder networks for network configurations to be used by other - networks. Config-only networks cannot be used directly to run containers - or services. - type: "boolean" - default: false - Containers: - description: | - Contains endpoints attached to the network. - type: "object" - additionalProperties: - $ref: "#/definitions/NetworkContainer" - example: - 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: - Name: "test" - EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" - MacAddress: "02:42:ac:13:00:02" - IPv4Address: "172.19.0.2/16" - IPv6Address: "" - Options: - description: | - Network-specific options uses when creating the network. - type: "object" - additionalProperties: - type: "string" - example: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Peers: - description: | - List of peer nodes for an overlay network. This field is only present - for overlay networks, and omitted for other network types. - type: "array" - items: - $ref: "#/definitions/PeerInfo" - x-nullable: true - # TODO: Add Services (only present when "verbose" is set). - - ConfigReference: - description: | - The config-only network source to provide the configuration for - this network. - type: "object" - properties: - Network: - description: | - The name of the config-only network that provides the network's - configuration. The specified network must be an existing config-only - network. Only network names are allowed, not network IDs. - type: "string" - example: "config_only_network_01" - - IPAM: - type: "object" - properties: - Driver: - description: "Name of the IPAM driver to use." - type: "string" - default: "default" - example: "default" - Config: - description: | - List of IPAM configuration options, specified as a map: - - ``` - {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } - ``` - type: "array" - items: - $ref: "#/definitions/IPAMConfig" - Options: - description: "Driver-specific options, specified as a map." - type: "object" - additionalProperties: - type: "string" - example: - foo: "bar" - - IPAMConfig: - type: "object" - properties: - Subnet: - type: "string" - example: "172.20.0.0/16" - IPRange: - type: "string" - example: "172.20.10.0/24" - Gateway: - type: "string" - example: "172.20.10.11" - AuxiliaryAddresses: - type: "object" - additionalProperties: - type: "string" - - NetworkContainer: - type: "object" - properties: - Name: - type: "string" - example: "container_1" - EndpointID: - type: "string" - example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" - MacAddress: - type: "string" - example: "02:42:ac:13:00:02" - IPv4Address: - type: "string" - example: "172.19.0.2/16" - IPv6Address: - type: "string" - example: "" - - PeerInfo: - description: | - PeerInfo represents one peer of an overlay network. - type: "object" - properties: - Name: - description: - ID of the peer-node in the Swarm cluster. - type: "string" - example: "6869d7c1732b" - IP: - description: - IP-address of the peer-node in the Swarm cluster. - type: "string" - example: "10.133.77.91" - - NetworkCreateResponse: - description: "OK response to NetworkCreate operation" - type: "object" - title: "NetworkCreateResponse" - x-go-name: "CreateResponse" - required: [Id, Warning] - properties: - Id: - description: "The ID of the created network." - type: "string" - x-nullable: false - example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" - Warning: - description: "Warnings encountered when creating the container" - type: "string" - x-nullable: false - example: "" - - BuildInfo: - type: "object" - properties: - id: - type: "string" - stream: - type: "string" - error: - type: "string" - x-nullable: true - description: |- - errors encountered during the operation. - - - > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - x-nullable: true - description: |- - Progress is a pre-formatted presentation of progressDetail. - - - > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. - progressDetail: - $ref: "#/definitions/ProgressDetail" - aux: - $ref: "#/definitions/ImageID" - - BuildCache: - type: "object" - description: | - BuildCache contains information about a build cache record. - properties: - ID: - type: "string" - description: | - Unique ID of the build cache record. - example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" - Parents: - description: | - List of parent build cache record IDs. - type: "array" - items: - type: "string" - x-nullable: true - example: ["hw53o5aio51xtltp5xjp8v7fx"] - Type: - type: "string" - description: | - Cache record type. - example: "regular" - # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 - enum: - - "internal" - - "frontend" - - "source.local" - - "source.git.checkout" - - "exec.cachemount" - - "regular" - Description: - type: "string" - description: | - Description of the build-step that produced the build cache. - example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" - InUse: - type: "boolean" - description: | - Indicates if the build cache is in use. - example: false - Shared: - type: "boolean" - description: | - Indicates if the build cache is shared. - example: true - Size: - description: | - Amount of disk space used by the build cache (in bytes). - type: "integer" - example: 51 - CreatedAt: - description: | - Date and time at which the build cache was created in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - LastUsedAt: - description: | - Date and time at which the build cache was last used in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - x-nullable: true - example: "2017-08-09T07:09:37.632105588Z" - UsageCount: - type: "integer" - example: 26 - - ImageID: - type: "object" - description: "Image ID or Digest" - properties: - ID: - type: "string" - example: - ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - - CreateImageInfo: - type: "object" - properties: - id: - type: "string" - error: - type: "string" - x-nullable: true - description: |- - errors encountered during the operation. - - - > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - x-nullable: true - description: |- - Progress is a pre-formatted presentation of progressDetail. - - - > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. - progressDetail: - $ref: "#/definitions/ProgressDetail" - - PushImageInfo: - type: "object" - properties: - error: - type: "string" - x-nullable: true - description: |- - errors encountered during the operation. - - - > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - x-nullable: true - description: |- - Progress is a pre-formatted presentation of progressDetail. - - - > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. - progressDetail: - $ref: "#/definitions/ProgressDetail" - - DeviceInfo: - type: "object" - description: | - DeviceInfo represents a device that can be used by a container. - properties: - Source: - type: "string" - example: "cdi" - description: | - The origin device driver. - ID: - type: "string" - example: "vendor.com/gpu=0" - description: | - The unique identifier for the device within its source driver. - For CDI devices, this would be an FQDN like "vendor.com/gpu=0". - - ErrorDetail: - type: "object" - properties: - code: - type: "integer" - message: - type: "string" - - ProgressDetail: - type: "object" - properties: - current: - type: "integer" - total: - type: "integer" - - ErrorResponse: - description: "Represents an error." - type: "object" - required: ["message"] - properties: - message: - description: "The error message." - type: "string" - x-nullable: false - example: - message: "Something went wrong." - - IDResponse: - description: "Response to an API call that returns just an Id" - type: "object" - x-go-name: "IDResponse" - required: ["Id"] - properties: - Id: - description: "The id of the newly created object." - type: "string" - x-nullable: false - - EndpointSettings: - description: "Configuration for a network endpoint." - type: "object" - properties: - # Configurations - IPAMConfig: - $ref: "#/definitions/EndpointIPAMConfig" - Links: - type: "array" - items: - type: "string" - example: - - "container_1" - - "container_2" - MacAddress: - description: | - MAC address for the endpoint on this network. The network driver might ignore this parameter. - type: "string" - example: "02:42:ac:11:00:04" - Aliases: - type: "array" - items: - type: "string" - example: - - "server_x" - - "server_y" - DriverOpts: - description: | - DriverOpts is a mapping of driver options and values. These options - are passed directly to the driver and are driver specific. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - GwPriority: - description: | - This property determines which endpoint will provide the default - gateway for a container. The endpoint with the highest priority will - be used. If multiple endpoints have the same priority, endpoints are - lexicographically sorted based on their network name, and the one - that sorts first is picked. - type: "number" - example: - - 10 - - # Operational data - NetworkID: - description: | - Unique ID of the network. - type: "string" - example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" - EndpointID: - description: | - Unique ID for the service endpoint in a Sandbox. - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for this network. - type: "string" - example: "172.17.0.1" - IPAddress: - description: | - IPv4 address. - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address. - type: "string" - example: "2001:db8:2::100" - GlobalIPv6Address: - description: | - Global IPv6 address. - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - type: "integer" - format: "int64" - example: 64 - DNSNames: - description: | - List of all DNS names an endpoint has on a specific network. This - list is based on the container name, network aliases, container short - ID, and hostname. - - These DNS names are non-fully qualified but can contain several dots. - You can get fully qualified DNS names by appending `.`. - For instance, if container name is `my.ctr` and the network is named - `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be - `my.ctr.testnet`. - type: array - items: - type: string - example: ["foobar", "server_x", "server_y", "my.ctr"] - - EndpointIPAMConfig: - description: | - EndpointIPAMConfig represents an endpoint's IPAM configuration. - type: "object" - x-nullable: true - properties: - IPv4Address: - type: "string" - example: "172.20.30.33" - IPv6Address: - type: "string" - example: "2001:db8:abcd::3033" - LinkLocalIPs: - type: "array" - items: - type: "string" - example: - - "169.254.34.68" - - "fe80::3468" - - PluginMount: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Source, Destination, Type, Options] - properties: - Name: - type: "string" - x-nullable: false - example: "some-mount" - Description: - type: "string" - x-nullable: false - example: "This is a mount that's used by the plugin." - Settable: - type: "array" - items: - type: "string" - Source: - type: "string" - example: "/var/lib/docker/plugins/" - Destination: - type: "string" - x-nullable: false - example: "/mnt/state" - Type: - type: "string" - x-nullable: false - example: "bind" - Options: - type: "array" - items: - type: "string" - example: - - "rbind" - - "rw" - - PluginDevice: - type: "object" - required: [Name, Description, Settable, Path] - x-nullable: false - properties: - Name: - type: "string" - x-nullable: false - Description: - type: "string" - x-nullable: false - Settable: - type: "array" - items: - type: "string" - Path: - type: "string" - example: "/dev/fuse" - - PluginEnv: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - Description: - x-nullable: false - type: "string" - Settable: - type: "array" - items: - type: "string" - Value: - type: "string" - - PluginInterfaceType: - type: "object" - x-nullable: false - required: [Prefix, Capability, Version] - properties: - Prefix: - type: "string" - x-nullable: false - Capability: - type: "string" - x-nullable: false - Version: - type: "string" - x-nullable: false - - PluginPrivilege: - description: | - Describes a permission the user has to accept upon installing - the plugin. - type: "object" - x-go-name: "PluginPrivilege" - properties: - Name: - type: "string" - example: "network" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - "host" - - Plugin: - description: "A plugin for the Engine API" - type: "object" - required: [Settings, Enabled, Config, Name] - properties: - Id: - type: "string" - example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" - Name: - type: "string" - x-nullable: false - example: "tiborvass/sample-volume-plugin" - Enabled: - description: - True if the plugin is running. False if the plugin is not running, - only installed. - type: "boolean" - x-nullable: false - example: true - Settings: - description: "Settings that can be modified by users." - type: "object" - x-nullable: false - required: [Args, Devices, Env, Mounts] - properties: - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - type: "string" - example: - - "DEBUG=0" - Args: - type: "array" - items: - type: "string" - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PluginReference: - description: "plugin remote reference used to push/pull the plugin" - type: "string" - x-nullable: false - example: "localhost:5000/tiborvass/sample-volume-plugin:latest" - Config: - description: "The config of a plugin." - type: "object" - x-nullable: false - required: - - Description - - Documentation - - Interface - - Entrypoint - - WorkDir - - Network - - Linux - - PidHost - - PropagatedMount - - IpcHost - - Mounts - - Env - - Args - properties: - DockerVersion: - description: "Docker Version used to create the plugin" - type: "string" - x-nullable: false - example: "17.06.0-ce" - Description: - type: "string" - x-nullable: false - example: "A sample volume plugin for Docker" - Documentation: - type: "string" - x-nullable: false - example: "https://docs.docker.com/engine/extend/plugins/" - Interface: - description: "The interface between Docker and the plugin" - x-nullable: false - type: "object" - required: [Types, Socket] - properties: - Types: - type: "array" - items: - $ref: "#/definitions/PluginInterfaceType" - example: - - "docker.volumedriver/1.0" - Socket: - type: "string" - x-nullable: false - example: "plugins.sock" - ProtocolScheme: - type: "string" - example: "some.protocol/v1.0" - description: "Protocol to use for clients connecting to the plugin." - enum: - - "" - - "moby.plugins.http/v1" - Entrypoint: - type: "array" - items: - type: "string" - example: - - "/usr/bin/sample-volume-plugin" - - "/data" - WorkDir: - type: "string" - x-nullable: false - example: "/bin/" - User: - type: "object" - x-nullable: false - properties: - UID: - type: "integer" - format: "uint32" - example: 1000 - GID: - type: "integer" - format: "uint32" - example: 1000 - Network: - type: "object" - x-nullable: false - required: [Type] - properties: - Type: - x-nullable: false - type: "string" - example: "host" - Linux: - type: "object" - x-nullable: false - required: [Capabilities, AllowAllDevices, Devices] - properties: - Capabilities: - type: "array" - items: - type: "string" - example: - - "CAP_SYS_ADMIN" - - "CAP_SYSLOG" - AllowAllDevices: - type: "boolean" - x-nullable: false - example: false - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PropagatedMount: - type: "string" - x-nullable: false - example: "/mnt/volumes" - IpcHost: - type: "boolean" - x-nullable: false - example: false - PidHost: - type: "boolean" - x-nullable: false - example: false - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - $ref: "#/definitions/PluginEnv" - example: - - Name: "DEBUG" - Description: "If set, prints debug messages" - Settable: null - Value: "0" - Args: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - example: "args" - Description: - x-nullable: false - type: "string" - example: "command line arguments" - Settable: - type: "array" - items: - type: "string" - Value: - type: "array" - items: - type: "string" - rootfs: - type: "object" - properties: - type: - type: "string" - example: "layers" - diff_ids: - type: "array" - items: - type: "string" - example: - - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - - ObjectVersion: - description: | - The version number of the object such as node, service, etc. This is needed - to avoid conflicting writes. The client must send the version number along - with the modified specification when updating these objects. - - This approach ensures safe concurrency and determinism in that the change - on the object may not be applied if the version number has changed from the - last read. In other words, if two update requests specify the same base - version, only one of the requests can succeed. As a result, two separate - update requests that happen at the same time will not unintentionally - overwrite each other. - type: "object" - properties: - Index: - type: "integer" - format: "uint64" - example: 373531 - - NodeSpec: - type: "object" - properties: - Name: - description: "Name for the node." - type: "string" - example: "my-node" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Role: - description: "Role of the node." - type: "string" - enum: - - "worker" - - "manager" - example: "manager" - Availability: - description: "Availability of the node." - type: "string" - enum: - - "active" - - "pause" - - "drain" - example: "active" - example: - Availability: "active" - Name: "node-name" - Role: "manager" - Labels: - foo: "bar" - - Node: - type: "object" - properties: - ID: - type: "string" - example: "24ifsmvkjbyhk" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the node was added to the swarm in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the node was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/NodeSpec" - Description: - $ref: "#/definitions/NodeDescription" - Status: - $ref: "#/definitions/NodeStatus" - ManagerStatus: - $ref: "#/definitions/ManagerStatus" - - NodeDescription: - description: | - NodeDescription encapsulates the properties of the Node as reported by the - agent. - type: "object" - properties: - Hostname: - type: "string" - example: "bf3067039e47" - Platform: - $ref: "#/definitions/Platform" - Resources: - $ref: "#/definitions/ResourceObject" - Engine: - $ref: "#/definitions/EngineDescription" - TLSInfo: - $ref: "#/definitions/TLSInfo" - - Platform: - description: | - Platform represents the platform (Arch/OS). - type: "object" - properties: - Architecture: - description: | - Architecture represents the hardware architecture (for example, - `x86_64`). - type: "string" - example: "x86_64" - OS: - description: | - OS represents the Operating System (for example, `linux` or `windows`). - type: "string" - example: "linux" - - EngineDescription: - description: "EngineDescription provides information about an engine." - type: "object" - properties: - EngineVersion: - type: "string" - example: "17.06.0" - Labels: - type: "object" - additionalProperties: - type: "string" - example: - foo: "bar" - Plugins: - type: "array" - items: - type: "object" - properties: - Type: - type: "string" - Name: - type: "string" - example: - - Type: "Log" - Name: "awslogs" - - Type: "Log" - Name: "fluentd" - - Type: "Log" - Name: "gcplogs" - - Type: "Log" - Name: "gelf" - - Type: "Log" - Name: "journald" - - Type: "Log" - Name: "json-file" - - Type: "Log" - Name: "splunk" - - Type: "Log" - Name: "syslog" - - Type: "Network" - Name: "bridge" - - Type: "Network" - Name: "host" - - Type: "Network" - Name: "ipvlan" - - Type: "Network" - Name: "macvlan" - - Type: "Network" - Name: "null" - - Type: "Network" - Name: "overlay" - - Type: "Volume" - Name: "local" - - Type: "Volume" - Name: "localhost:5000/vieux/sshfs:latest" - - Type: "Volume" - Name: "vieux/sshfs:latest" - - TLSInfo: - description: | - Information about the issuer of leaf TLS certificates and the trusted root - CA certificate. - type: "object" - properties: - TrustRoot: - description: | - The root CA certificate(s) that are used to validate leaf TLS - certificates. - type: "string" - CertIssuerSubject: - description: - The base64-url-safe-encoded raw subject bytes of the issuer. - type: "string" - CertIssuerPublicKey: - description: | - The base64-url-safe-encoded raw public key bytes of the issuer. - type: "string" - example: - TrustRoot: | - -----BEGIN CERTIFICATE----- - MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw - EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 - MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH - A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf - 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB - Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO - PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz - pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H - -----END CERTIFICATE----- - CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" - CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" - - NodeStatus: - description: | - NodeStatus represents the status of a node. - - It provides the current status of the node, as seen by the manager. - type: "object" - properties: - State: - $ref: "#/definitions/NodeState" - Message: - type: "string" - example: "" - Addr: - description: "IP address of the node." - type: "string" - example: "172.17.0.2" - - NodeState: - description: "NodeState represents the state of a node." - type: "string" - enum: - - "unknown" - - "down" - - "ready" - - "disconnected" - example: "ready" - - ManagerStatus: - description: | - ManagerStatus represents the status of a manager. - - It provides the current status of a node's manager component, if the node - is a manager. - x-nullable: true - type: "object" - properties: - Leader: - type: "boolean" - default: false - example: true - Reachability: - $ref: "#/definitions/Reachability" - Addr: - description: | - The IP address and port at which the manager is reachable. - type: "string" - example: "10.0.0.46:2377" - - Reachability: - description: "Reachability represents the reachability of a node." - type: "string" - enum: - - "unknown" - - "unreachable" - - "reachable" - example: "reachable" - - SwarmSpec: - description: "User modifiable swarm configuration." - type: "object" - properties: - Name: - description: "Name of the swarm." - type: "string" - example: "default" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.corp.type: "production" - com.example.corp.department: "engineering" - Orchestration: - description: "Orchestration configuration." - type: "object" - x-nullable: true - properties: - TaskHistoryRetentionLimit: - description: | - The number of historic tasks to keep per instance or node. If - negative, never remove completed or failed tasks. - type: "integer" - format: "int64" - example: 10 - Raft: - description: "Raft configuration." - type: "object" - properties: - SnapshotInterval: - description: "The number of log entries between snapshots." - type: "integer" - format: "uint64" - example: 10000 - KeepOldSnapshots: - description: | - The number of snapshots to keep beyond the current snapshot. - type: "integer" - format: "uint64" - LogEntriesForSlowFollowers: - description: | - The number of log entries to keep around to sync up slow followers - after a snapshot is created. - type: "integer" - format: "uint64" - example: 500 - ElectionTick: - description: | - The number of ticks that a follower will wait for a message from - the leader before becoming a candidate and starting an election. - `ElectionTick` must be greater than `HeartbeatTick`. - - A tick currently defaults to one second, so these translate - directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 3 - HeartbeatTick: - description: | - The number of ticks between heartbeats. Every HeartbeatTick ticks, - the leader will send a heartbeat to the followers. - - A tick currently defaults to one second, so these translate - directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 1 - Dispatcher: - description: "Dispatcher configuration." - type: "object" - x-nullable: true - properties: - HeartbeatPeriod: - description: | - The delay for an agent to send a heartbeat to the dispatcher. - type: "integer" - format: "int64" - example: 5000000000 - CAConfig: - description: "CA configuration." - type: "object" - x-nullable: true - properties: - NodeCertExpiry: - description: "The duration node certificates are issued for." - type: "integer" - format: "int64" - example: 7776000000000000 - ExternalCAs: - description: | - Configuration for forwarding signing requests to an external - certificate authority. - type: "array" - items: - type: "object" - properties: - Protocol: - description: | - Protocol for communication with the external CA (currently - only `cfssl` is supported). - type: "string" - enum: - - "cfssl" - default: "cfssl" - URL: - description: | - URL where certificate signing requests should be sent. - type: "string" - Options: - description: | - An object with key/value pairs that are interpreted as - protocol-specific options for the external CA driver. - type: "object" - additionalProperties: - type: "string" - CACert: - description: | - The root CA certificate (in PEM format) this external CA uses - to issue TLS certificates (assumed to be to the current swarm - root CA certificate if not provided). - type: "string" - SigningCACert: - description: | - The desired signing CA certificate for all swarm node TLS leaf - certificates, in PEM format. - type: "string" - SigningCAKey: - description: | - The desired signing CA key for all swarm node TLS leaf certificates, - in PEM format. - type: "string" - ForceRotate: - description: | - An integer whose purpose is to force swarm to generate a new - signing CA certificate and key, if none have been specified in - `SigningCACert` and `SigningCAKey` - format: "uint64" - type: "integer" - EncryptionConfig: - description: "Parameters related to encryption-at-rest." - type: "object" - properties: - AutoLockManagers: - description: | - If set, generate a key and use it to lock data stored on the - managers. - type: "boolean" - example: false - TaskDefaults: - description: "Defaults for creating tasks in this cluster." - type: "object" - properties: - LogDriver: - description: | - The log driver to use for tasks created in the orchestrator if - unspecified by a service. - - Updating this value only affects new tasks. Existing tasks continue - to use their previously configured log driver until recreated. - type: "object" - properties: - Name: - description: | - The log driver to use as a default for new tasks. - type: "string" - example: "json-file" - Options: - description: | - Driver-specific options for the selected log driver, specified - as key/value pairs. - type: "object" - additionalProperties: - type: "string" - example: - "max-file": "10" - "max-size": "100m" - - # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but - # without `JoinTokens`. - ClusterInfo: - description: | - ClusterInfo represents information about the swarm as is returned by the - "/info" endpoint. Join-tokens are not included. - x-nullable: true - type: "object" - properties: - ID: - description: "The ID of the swarm." - type: "string" - example: "abajmipo7b4xz5ip2nrla6b11" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the swarm was initialised in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the swarm was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/SwarmSpec" - TLSInfo: - $ref: "#/definitions/TLSInfo" - RootRotationInProgress: - description: | - Whether there is currently a root CA rotation in progress for the swarm - type: "boolean" - example: false - DataPathPort: - description: | - DataPathPort specifies the data path port number for data traffic. - Acceptable port range is 1024 to 49151. - If no port is set or is set to 0, the default port (4789) is used. - type: "integer" - format: "uint32" - default: 4789 - example: 4789 - DefaultAddrPool: - description: | - Default Address Pool specifies default subnet pools for global scope - networks. - type: "array" - items: - type: "string" - format: "CIDR" - example: ["10.10.0.0/16", "20.20.0.0/16"] - SubnetSize: - description: | - SubnetSize specifies the subnet size of the networks created from the - default subnet pool. - type: "integer" - format: "uint32" - maximum: 29 - default: 24 - example: 24 - - JoinTokens: - description: | - JoinTokens contains the tokens workers and managers need to join the swarm. - type: "object" - properties: - Worker: - description: | - The token workers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" - Manager: - description: | - The token managers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - - Swarm: - type: "object" - allOf: - - $ref: "#/definitions/ClusterInfo" - - type: "object" - properties: - JoinTokens: - $ref: "#/definitions/JoinTokens" - - TaskSpec: - description: "User modifiable task configuration." - type: "object" - properties: - PluginSpec: - type: "object" - description: | - Plugin spec for the service. *(Experimental release only.)* - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - properties: - Name: - description: "The name or 'alias' to use for the plugin." - type: "string" - Remote: - description: "The plugin image reference to use." - type: "string" - Disabled: - description: "Disable the plugin once scheduled." - type: "boolean" - PluginPrivilege: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - ContainerSpec: - type: "object" - description: | - Container spec for the service. - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - properties: - Image: - description: "The image name to use for the container" - type: "string" - Labels: - description: "User-defined key/value data." - type: "object" - additionalProperties: - type: "string" - Command: - description: "The command to be run in the image." - type: "array" - items: - type: "string" - Args: - description: "Arguments to the command." - type: "array" - items: - type: "string" - Hostname: - description: | - The hostname to use for the container, as a valid - [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. - type: "string" - Env: - description: | - A list of environment variables in the form `VAR=value`. - type: "array" - items: - type: "string" - Dir: - description: "The working directory for commands to run in." - type: "string" - User: - description: "The user inside the container." - type: "string" - Groups: - type: "array" - description: | - A list of additional groups that the container process will run as. - items: - type: "string" - Privileges: - type: "object" - description: "Security options for the container" - properties: - CredentialSpec: - type: "object" - description: "CredentialSpec for managed service account (Windows only)" - properties: - Config: - type: "string" - example: "0bt9dmxjvjiqermk6xrop3ekq" - description: | - Load credential spec from a Swarm Config with the given ID. - The specified config must also be present in the Configs - field with the Runtime property set. - -


- - - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - File: - type: "string" - example: "spec.json" - description: | - Load credential spec from this file. The file is read by - the daemon, and must be present in the `CredentialSpecs` - subdirectory in the docker data directory, which defaults - to `C:\ProgramData\Docker\` on Windows. - - For example, specifying `spec.json` loads - `C:\ProgramData\Docker\CredentialSpecs\spec.json`. - -


- - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - Registry: - type: "string" - description: | - Load credential spec from this value in the Windows - registry. The specified registry value must be located in: - - `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` - -


- - - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - SELinuxContext: - type: "object" - description: "SELinux labels of the container" - properties: - Disable: - type: "boolean" - description: "Disable SELinux" - User: - type: "string" - description: "SELinux user label" - Role: - type: "string" - description: "SELinux role label" - Type: - type: "string" - description: "SELinux type label" - Level: - type: "string" - description: "SELinux level label" - Seccomp: - type: "object" - description: "Options for configuring seccomp on the container" - properties: - Mode: - type: "string" - enum: - - "default" - - "unconfined" - - "custom" - Profile: - description: "The custom seccomp profile as a json object" - type: "string" - AppArmor: - type: "object" - description: "Options for configuring AppArmor on the container" - properties: - Mode: - type: "string" - enum: - - "default" - - "disabled" - NoNewPrivileges: - type: "boolean" - description: "Configuration of the no_new_privs bit in the container" - - TTY: - description: "Whether a pseudo-TTY should be allocated." - type: "boolean" - OpenStdin: - description: "Open `stdin`" - type: "boolean" - ReadOnly: - description: "Mount the container's root filesystem as read only." - type: "boolean" - Mounts: - description: | - Specification for mounts to be added to containers created as part - of the service. - type: "array" - items: - $ref: "#/definitions/Mount" - StopSignal: - description: "Signal to stop the container." - type: "string" - StopGracePeriod: - description: | - Amount of time to wait for the container to terminate before - forcefully killing it. - type: "integer" - format: "int64" - HealthCheck: - $ref: "#/definitions/HealthConfig" - Hosts: - type: "array" - description: | - A list of hostname/IP mappings to add to the container's `hosts` - file. The format of extra hosts is specified in the - [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) - man page: - - IP_address canonical_hostname [aliases...] - items: - type: "string" - DNSConfig: - description: | - Specification for DNS related configurations in resolver configuration - file (`resolv.conf`). - type: "object" - properties: - Nameservers: - description: "The IP addresses of the name servers." - type: "array" - items: - type: "string" - Search: - description: "A search list for host-name lookup." - type: "array" - items: - type: "string" - Options: - description: | - A list of internal resolver variables to be modified (e.g., - `debug`, `ndots:3`, etc.). - type: "array" - items: - type: "string" - Secrets: - description: | - Secrets contains references to zero or more secrets that will be - exposed to the service. - type: "array" - items: - type: "object" - properties: - File: - description: | - File represents a specific target that is backed by a file. - type: "object" - properties: - Name: - description: | - Name represents the final filename in the filesystem. - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - SecretID: - description: | - SecretID represents the ID of the specific secret that we're - referencing. - type: "string" - SecretName: - description: | - SecretName is the name of the secret that this references, - but this is just provided for lookup/display purposes. The - secret in the reference will be identified by its ID. - type: "string" - OomScoreAdj: - type: "integer" - format: "int64" - description: | - An integer value containing the score given to the container in - order to tune OOM killer preferences. - example: 0 - Configs: - description: | - Configs contains references to zero or more configs that will be - exposed to the service. - type: "array" - items: - type: "object" - properties: - File: - description: | - File represents a specific target that is backed by a file. - -


- - > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive - type: "object" - properties: - Name: - description: | - Name represents the final filename in the filesystem. - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - Runtime: - description: | - Runtime represents a target that is not mounted into the - container but is used by the task - -


- - > **Note**: `Configs.File` and `Configs.Runtime` are mutually - > exclusive - type: "object" - ConfigID: - description: | - ConfigID represents the ID of the specific config that we're - referencing. - type: "string" - ConfigName: - description: | - ConfigName is the name of the config that this references, - but this is just provided for lookup/display purposes. The - config in the reference will be identified by its ID. - type: "string" - Isolation: - type: "string" - description: | - Isolation technology of the containers running the service. - (Windows only) - enum: - - "default" - - "process" - - "hyperv" - - "" - Init: - description: | - Run an init inside the container that forwards signals and reaps - processes. This field is omitted if empty, and the default (as - configured on the daemon) is used. - type: "boolean" - x-nullable: true - Sysctls: - description: | - Set kernel namedspaced parameters (sysctls) in the container. - The Sysctls option on services accepts the same sysctls as the - are supported on containers. Note that while the same sysctls are - supported, no guarantees or checks are made about their - suitability for a clustered environment, and it's up to the user - to determine whether a given sysctl will work properly in a - Service. - type: "object" - additionalProperties: - type: "string" - # This option is not used by Windows containers - CapabilityAdd: - type: "array" - description: | - A list of kernel capabilities to add to the default set - for the container. - items: - type: "string" - example: - - "CAP_NET_RAW" - - "CAP_SYS_ADMIN" - - "CAP_SYS_CHROOT" - - "CAP_SYSLOG" - CapabilityDrop: - type: "array" - description: | - A list of kernel capabilities to drop from the default set - for the container. - items: - type: "string" - example: - - "CAP_NET_RAW" - Ulimits: - description: | - A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" - type: "array" - items: - type: "object" - properties: - Name: - description: "Name of ulimit" - type: "string" - Soft: - description: "Soft limit" - type: "integer" - Hard: - description: "Hard limit" - type: "integer" - NetworkAttachmentSpec: - description: | - Read-only spec type for non-swarm containers attached to swarm overlay - networks. - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - type: "object" - properties: - ContainerID: - description: "ID of the container represented by this task" - type: "string" - Resources: - description: | - Resource requirements which apply to each individual container created - as part of the service. - type: "object" - properties: - Limits: - description: "Define resources limits." - $ref: "#/definitions/Limit" - Reservations: - description: "Define resources reservation." - $ref: "#/definitions/ResourceObject" - RestartPolicy: - description: | - Specification for the restart policy which applies to containers - created as part of this service. - type: "object" - properties: - Condition: - description: "Condition for restart." - type: "string" - enum: - - "none" - - "on-failure" - - "any" - Delay: - description: "Delay between restart attempts." - type: "integer" - format: "int64" - MaxAttempts: - description: | - Maximum attempts to restart a given container before giving up - (default value is 0, which is ignored). - type: "integer" - format: "int64" - default: 0 - Window: - description: | - Windows is the time window used to evaluate the restart policy - (default value is 0, which is unbounded). - type: "integer" - format: "int64" - default: 0 - Placement: - type: "object" - properties: - Constraints: - description: | - An array of constraint expressions to limit the set of nodes where - a task can be scheduled. Constraint expressions can either use a - _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find - nodes that satisfy every expression (AND match). Constraints can - match node or Docker Engine labels as follows: - - node attribute | matches | example - ---------------------|--------------------------------|----------------------------------------------- - `node.id` | Node ID | `node.id==2ivku8v2gvtg4` - `node.hostname` | Node hostname | `node.hostname!=node-2` - `node.role` | Node role (`manager`/`worker`) | `node.role==manager` - `node.platform.os` | Node operating system | `node.platform.os==windows` - `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` - `node.labels` | User-defined node labels | `node.labels.security==high` - `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` - - `engine.labels` apply to Docker Engine labels like operating system, - drivers, etc. Swarm administrators add `node.labels` for operational - purposes by using the [`node update endpoint`](#operation/NodeUpdate). - - type: "array" - items: - type: "string" - example: - - "node.hostname!=node3.corp.example.com" - - "node.role!=manager" - - "node.labels.type==production" - - "node.platform.os==linux" - - "node.platform.arch==x86_64" - Preferences: - description: | - Preferences provide a way to make the scheduler aware of factors - such as topology. They are provided in order from highest to - lowest precedence. - type: "array" - items: - type: "object" - properties: - Spread: - type: "object" - properties: - SpreadDescriptor: - description: | - label descriptor, such as `engine.labels.az`. - type: "string" - example: - - Spread: - SpreadDescriptor: "node.labels.datacenter" - - Spread: - SpreadDescriptor: "node.labels.rack" - MaxReplicas: - description: | - Maximum number of replicas for per node (default value is 0, which - is unlimited) - type: "integer" - format: "int64" - default: 0 - Platforms: - description: | - Platforms stores all the platforms that the service's image can - run on. This field is used in the platform filter for scheduling. - If empty, then the platform filter is off, meaning there are no - scheduling restrictions. - type: "array" - items: - $ref: "#/definitions/Platform" - ForceUpdate: - description: | - A counter that triggers an update even if no relevant parameters have - been changed. - type: "integer" - Runtime: - description: | - Runtime is the type of runtime specified for the task executor. - type: "string" - Networks: - description: "Specifies which networks the service should attach to." - type: "array" - items: - $ref: "#/definitions/NetworkAttachmentConfig" - LogDriver: - description: | - Specifies the log driver to use for tasks created from this spec. If - not present, the default one for the swarm will be used, finally - falling back to the engine default if not specified. - type: "object" - properties: - Name: - type: "string" - Options: - type: "object" - additionalProperties: - type: "string" - - TaskState: - type: "string" - enum: - - "new" - - "allocated" - - "pending" - - "assigned" - - "accepted" - - "preparing" - - "ready" - - "starting" - - "running" - - "complete" - - "shutdown" - - "failed" - - "rejected" - - "remove" - - "orphaned" - - ContainerStatus: - type: "object" - description: "represents the status of a container." - properties: - ContainerID: - type: "string" - PID: - type: "integer" - ExitCode: - type: "integer" - - PortStatus: - type: "object" - description: "represents the port status of a task's host ports whose service has published host ports" - properties: - Ports: - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - - TaskStatus: - type: "object" - description: "represents the status of a task." - properties: - Timestamp: - type: "string" - format: "dateTime" - State: - $ref: "#/definitions/TaskState" - Message: - type: "string" - Err: - type: "string" - ContainerStatus: - $ref: "#/definitions/ContainerStatus" - PortStatus: - $ref: "#/definitions/PortStatus" - - Task: - type: "object" - properties: - ID: - description: "The ID of the task." - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Name: - description: "Name of the task." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Spec: - $ref: "#/definitions/TaskSpec" - ServiceID: - description: "The ID of the service this task is part of." - type: "string" - Slot: - type: "integer" - NodeID: - description: "The ID of the node that this task is on." - type: "string" - AssignedGenericResources: - $ref: "#/definitions/GenericResources" - Status: - $ref: "#/definitions/TaskStatus" - DesiredState: - $ref: "#/definitions/TaskState" - JobIteration: - description: | - If the Service this Task belongs to is a job-mode service, contains - the JobIteration of the Service this Task was created for. Absent if - the Task was created for a Replicated or Global Service. - $ref: "#/definitions/ObjectVersion" - example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - AssignedGenericResources: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - ServiceSpec: - description: "User modifiable configuration for a service." - type: object - properties: - Name: - description: "Name of the service." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - TaskTemplate: - $ref: "#/definitions/TaskSpec" - Mode: - description: "Scheduling mode for the service." - type: "object" - properties: - Replicated: - type: "object" - properties: - Replicas: - type: "integer" - format: "int64" - Global: - type: "object" - ReplicatedJob: - description: | - The mode used for services with a finite number of tasks that run - to a completed state. - type: "object" - properties: - MaxConcurrent: - description: | - The maximum number of replicas to run simultaneously. - type: "integer" - format: "int64" - default: 1 - TotalCompletions: - description: | - The total number of replicas desired to reach the Completed - state. If unset, will default to the value of `MaxConcurrent` - type: "integer" - format: "int64" - GlobalJob: - description: | - The mode used for services which run a task to the completed state - on each valid node. - type: "object" - UpdateConfig: - description: "Specification for the update strategy of the service." - type: "object" - properties: - Parallelism: - description: | - Maximum number of tasks to be updated in one iteration (0 means - unlimited parallelism). - type: "integer" - format: "int64" - Delay: - description: "Amount of time between updates, in nanoseconds." - type: "integer" - format: "int64" - FailureAction: - description: | - Action to take if an updated task fails to run, or stops running - during the update. - type: "string" - enum: - - "continue" - - "pause" - - "rollback" - Monitor: - description: | - Amount of time to monitor each updated task for failures, in - nanoseconds. - type: "integer" - format: "int64" - MaxFailureRatio: - description: | - The fraction of tasks that may fail during an update before the - failure action is invoked, specified as a floating point number - between 0 and 1. - type: "number" - default: 0 - Order: - description: | - The order of operations when rolling out an updated task. Either - the old task is shut down before the new task is started, or the - new task is started before the old task is shut down. - type: "string" - enum: - - "stop-first" - - "start-first" - RollbackConfig: - description: "Specification for the rollback strategy of the service." - type: "object" - properties: - Parallelism: - description: | - Maximum number of tasks to be rolled back in one iteration (0 means - unlimited parallelism). - type: "integer" - format: "int64" - Delay: - description: | - Amount of time between rollback iterations, in nanoseconds. - type: "integer" - format: "int64" - FailureAction: - description: | - Action to take if an rolled back task fails to run, or stops - running during the rollback. - type: "string" - enum: - - "continue" - - "pause" - Monitor: - description: | - Amount of time to monitor each rolled back task for failures, in - nanoseconds. - type: "integer" - format: "int64" - MaxFailureRatio: - description: | - The fraction of tasks that may fail during a rollback before the - failure action is invoked, specified as a floating point number - between 0 and 1. - type: "number" - default: 0 - Order: - description: | - The order of operations when rolling back a task. Either the old - task is shut down before the new task is started, or the new task - is started before the old task is shut down. - type: "string" - enum: - - "stop-first" - - "start-first" - Networks: - description: | - Specifies which networks the service should attach to. - - Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. - type: "array" - items: - $ref: "#/definitions/NetworkAttachmentConfig" - - EndpointSpec: - $ref: "#/definitions/EndpointSpec" - - EndpointPortConfig: - type: "object" - properties: - Name: - type: "string" - Protocol: - type: "string" - enum: - - "tcp" - - "udp" - - "sctp" - TargetPort: - description: "The port inside the container." - type: "integer" - PublishedPort: - description: "The port on the swarm hosts." - type: "integer" - PublishMode: - description: | - The mode in which port is published. - -


- - - "ingress" makes the target port accessible on every node, - regardless of whether there is a task for the service running on - that node or not. - - "host" bypasses the routing mesh and publish the port directly on - the swarm node where that service is running. - - type: "string" - enum: - - "ingress" - - "host" - default: "ingress" - example: "ingress" - - EndpointSpec: - description: "Properties that can be configured to access and load balance a service." - type: "object" - properties: - Mode: - description: | - The mode of resolution to use for internal load balancing between tasks. - type: "string" - enum: - - "vip" - - "dnsrr" - default: "vip" - Ports: - description: | - List of exposed ports that this service is accessible on from the - outside. Ports can only be provided if `vip` resolution mode is used. - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - - Service: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ServiceSpec" - Endpoint: - type: "object" - properties: - Spec: - $ref: "#/definitions/EndpointSpec" - Ports: - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - VirtualIPs: - type: "array" - items: - type: "object" - properties: - NetworkID: - type: "string" - Addr: - type: "string" - UpdateStatus: - description: "The status of a service update." - type: "object" - properties: - State: - type: "string" - enum: - - "updating" - - "paused" - - "completed" - StartedAt: - type: "string" - format: "dateTime" - CompletedAt: - type: "string" - format: "dateTime" - Message: - type: "string" - ServiceStatus: - description: | - The status of the service's tasks. Provided only when requested as - part of a ServiceList operation. - type: "object" - properties: - RunningTasks: - description: | - The number of tasks for the service currently in the Running state. - type: "integer" - format: "uint64" - example: 7 - DesiredTasks: - description: | - The number of tasks for the service desired to be running. - For replicated services, this is the replica count from the - service spec. For global services, this is computed by taking - count of all tasks for the service with a Desired State other - than Shutdown. - type: "integer" - format: "uint64" - example: 10 - CompletedTasks: - description: | - The number of tasks for a job that are in the Completed state. - This field must be cross-referenced with the service type, as the - value of 0 may mean the service is not in a job mode, or it may - mean the job-mode service has no tasks yet Completed. - type: "integer" - format: "uint64" - JobStatus: - description: | - The status of the service when it is in one of ReplicatedJob or - GlobalJob modes. Absent on Replicated and Global mode services. The - JobIteration is an ObjectVersion, but unlike the Service's version, - does not need to be sent with an update request. - type: "object" - properties: - JobIteration: - description: | - JobIteration is a value increased each time a Job is executed, - successfully or otherwise. "Executed", in this case, means the - job as a whole has been started, not that an individual Task has - been launched. A job is "Executed" when its ServiceSpec is - updated. JobIteration can be used to disambiguate Tasks belonging - to different executions of a job. Though JobIteration will - increase with each subsequent execution, it may not necessarily - increase by 1, and so JobIteration should not be used to - $ref: "#/definitions/ObjectVersion" - LastExecution: - description: | - The last time, as observed by the server, that this job was - started. - type: "string" - format: "dateTime" - example: - ID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Version: - Index: 19 - CreatedAt: "2016-06-07T21:05:51.880065305Z" - UpdatedAt: "2016-06-07T21:07:29.962229872Z" - Spec: - Name: "hopeful_cori" - TaskTemplate: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Endpoint: - Spec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - VirtualIPs: - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.2/16" - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.3/16" - - ImageDeleteResponseItem: - type: "object" - x-go-name: "DeleteResponse" - properties: - Untagged: - description: "The image ID of an image that was untagged" - type: "string" - Deleted: - description: "The image ID of an image that was deleted" - type: "string" - - ServiceCreateResponse: - type: "object" - description: | - contains the information returned to a client on the - creation of a new service. - properties: - ID: - description: "The ID of the created service." - type: "string" - x-nullable: false - example: "ak7w3gjqoa3kuz8xcpnyy0pvl" - Warnings: - description: | - Optional warning message. - - FIXME(thaJeztah): this should have "omitempty" in the generated type. - type: "array" - x-nullable: true - items: - type: "string" - example: - - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - - ServiceUpdateResponse: - type: "object" - properties: - Warnings: - description: "Optional warning messages" - type: "array" - items: - type: "string" - example: - Warnings: - - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - - ContainerInspectResponse: - type: "object" - title: "ContainerInspectResponse" - x-go-name: "InspectResponse" - properties: - Id: - description: |- - The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). - type: "string" - x-go-name: "ID" - minLength: 64 - maxLength: 64 - pattern: "^[0-9a-fA-F]{64}$" - example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" - Created: - description: |- - Date and time at which the container was created, formatted in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - x-nullable: true - example: "2025-02-17T17:43:39.64001363Z" - Path: - description: |- - The path to the command being run - type: "string" - example: "/bin/sh" - Args: - description: "The arguments to the command being run" - type: "array" - items: - type: "string" - example: - - "-c" - - "exit 9" - State: - $ref: "#/definitions/ContainerState" - Image: - description: |- - The ID (digest) of the image that this container was created from. - type: "string" - example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" - ResolvConfPath: - description: |- - Location of the `/etc/resolv.conf` generated for the container on the - host. - - This file is managed through the docker daemon, and should not be - accessed or modified by other tools. - type: "string" - example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" - HostnamePath: - description: |- - Location of the `/etc/hostname` generated for the container on the - host. - - This file is managed through the docker daemon, and should not be - accessed or modified by other tools. - type: "string" - example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" - HostsPath: - description: |- - Location of the `/etc/hosts` generated for the container on the - host. - - This file is managed through the docker daemon, and should not be - accessed or modified by other tools. - type: "string" - example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" - LogPath: - description: |- - Location of the file used to buffer the container's logs. Depending on - the logging-driver used for the container, this field may be omitted. - - This file is managed through the docker daemon, and should not be - accessed or modified by other tools. - type: "string" - x-nullable: true - example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" - Name: - description: |- - The name associated with this container. - - For historic reasons, the name may be prefixed with a forward-slash (`/`). - type: "string" - example: "/funny_chatelet" - RestartCount: - description: |- - Number of times the container was restarted since it was created, - or since daemon was started. - type: "integer" - example: 0 - Driver: - description: |- - The storage-driver used for the container's filesystem (graph-driver - or snapshotter). - type: "string" - example: "overlayfs" - Platform: - description: |- - The platform (operating system) for which the container was created. - - This field was introduced for the experimental "LCOW" (Linux Containers - On Windows) features, which has been removed. In most cases, this field - is equal to the host's operating system (`linux` or `windows`). - type: "string" - example: "linux" - ImageManifestDescriptor: - $ref: "#/definitions/OCIDescriptor" - description: |- - OCI descriptor of the platform-specific manifest of the image - the container was created from. - - Note: Only available if the daemon provides a multi-platform - image store. - MountLabel: - description: |- - SELinux mount label set for the container. - type: "string" - example: "" - ProcessLabel: - description: |- - SELinux process label set for the container. - type: "string" - example: "" - AppArmorProfile: - description: |- - The AppArmor profile set for the container. - type: "string" - example: "" - ExecIDs: - description: |- - IDs of exec instances that are running in the container. - type: "array" - items: - type: "string" - x-nullable: true - example: - - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" - HostConfig: - $ref: "#/definitions/HostConfig" - GraphDriver: - $ref: "#/definitions/DriverData" - SizeRw: - description: |- - The size of files that have been created or changed by this container. - - This field is omitted by default, and only set when size is requested - in the API request. - type: "integer" - format: "int64" - x-nullable: true - example: "122880" - SizeRootFs: - description: |- - The total size of all files in the read-only layers from the image - that the container uses. These layers can be shared between containers. - - This field is omitted by default, and only set when size is requested - in the API request. - type: "integer" - format: "int64" - x-nullable: true - example: "1653948416" - Mounts: - description: |- - List of mounts used by the container. - type: "array" - items: - $ref: "#/definitions/MountPoint" - Config: - $ref: "#/definitions/ContainerConfig" - NetworkSettings: - $ref: "#/definitions/NetworkSettings" - - ContainerSummary: - type: "object" - properties: - Id: - description: |- - The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). - type: "string" - x-go-name: "ID" - minLength: 64 - maxLength: 64 - pattern: "^[0-9a-fA-F]{64}$" - example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" - Names: - description: |- - The names associated with this container. Most containers have a single - name, but when using legacy "links", the container can have multiple - names. - - For historic reasons, names are prefixed with a forward-slash (`/`). - type: "array" - items: - type: "string" - example: - - "/funny_chatelet" - Image: - description: |- - The name or ID of the image used to create the container. - - This field shows the image reference as was specified when creating the container, - which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` - or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), - short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). - - The content of this field can be updated at runtime if the image used to - create the container is untagged, in which case the field is updated to - contain the the image ID (digest) it was resolved to in its canonical, - non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). - type: "string" - example: "docker.io/library/ubuntu:latest" - ImageID: - description: |- - The ID (digest) of the image that this container was created from. - type: "string" - example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" - ImageManifestDescriptor: - $ref: "#/definitions/OCIDescriptor" - x-nullable: true - description: | - OCI descriptor of the platform-specific manifest of the image - the container was created from. - - Note: Only available if the daemon provides a multi-platform - image store. - - This field is not populated in the `GET /system/df` endpoint. - Command: - description: "Command to run when starting the container" - type: "string" - example: "/bin/bash" - Created: - description: |- - Date and time at which the container was created as a Unix timestamp - (number of seconds since EPOCH). - type: "integer" - format: "int64" - example: "1739811096" - Ports: - description: |- - Port-mappings for the container. - type: "array" - items: - $ref: "#/definitions/Port" - SizeRw: - description: |- - The size of files that have been created or changed by this container. - - This field is omitted by default, and only set when size is requested - in the API request. - type: "integer" - format: "int64" - x-nullable: true - example: "122880" - SizeRootFs: - description: |- - The total size of all files in the read-only layers from the image - that the container uses. These layers can be shared between containers. - - This field is omitted by default, and only set when size is requested - in the API request. - type: "integer" - format: "int64" - x-nullable: true - example: "1653948416" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - State: - description: | - The state of this container. - type: "string" - enum: - - "created" - - "running" - - "paused" - - "restarting" - - "exited" - - "removing" - - "dead" - example: "running" - Status: - description: |- - Additional human-readable status of this container (e.g. `Exit 0`) - type: "string" - example: "Up 4 days" - HostConfig: - type: "object" - description: |- - Summary of host-specific runtime information of the container. This - is a reduced set of information in the container's "HostConfig" as - available in the container "inspect" response. - properties: - NetworkMode: - description: |- - Networking mode (`host`, `none`, `container:`) or name of the - primary network the container is using. - - This field is primarily for backward compatibility. The container - can be connected to multiple networks for which information can be - found in the `NetworkSettings.Networks` field, which enumerates - settings per network. - type: "string" - example: "mynetwork" - Annotations: - description: |- - Arbitrary key-value metadata attached to the container. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - io.kubernetes.docker.type: "container" - io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" - NetworkSettings: - description: |- - Summary of the container's network settings - type: "object" - properties: - Networks: - type: "object" - description: |- - Summary of network-settings for each network the container is - attached to. - additionalProperties: - $ref: "#/definitions/EndpointSettings" - Mounts: - type: "array" - description: |- - List of mounts used by the container. - items: - $ref: "#/definitions/MountPoint" - - Driver: - description: "Driver represents a driver (network, logging, secrets)." - type: "object" - required: [Name] - properties: - Name: - description: "Name of the driver." - type: "string" - x-nullable: false - example: "some-driver" - Options: - description: "Key/value map of driver-specific options." - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: - OptionA: "value for driver-specific option A" - OptionB: "value for driver-specific option B" - - SecretSpec: - type: "object" - properties: - Name: - description: "User-defined name of the secret." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Data: - description: | - Data is the data to store as a secret, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. - It must be empty if the Driver field is set, in which case the data is - loaded from an external secret store. The maximum allowed size is 500KB, - as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). - - This field is only used to _create_ a secret, and is not returned by - other endpoints. - type: "string" - example: "" - Driver: - description: | - Name of the secrets driver used to fetch the secret's value from an - external secret store. - $ref: "#/definitions/Driver" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Secret: - type: "object" - properties: - ID: - type: "string" - example: "blt1owaxmitz71s9v5zh81zun" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - Spec: - $ref: "#/definitions/SecretSpec" - - ConfigSpec: - type: "object" - properties: - Name: - description: "User-defined name of the config." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Data: - description: | - Data is the data to store as a config, formatted as a Base64-url-safe-encoded - ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. - The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). - type: "string" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Config: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ConfigSpec" - - ContainerState: - description: | - ContainerState stores container's running state. It's part of ContainerJSONBase - and will be returned by the "inspect" command. - type: "object" - x-nullable: true - properties: - Status: - description: | - String representation of the container state. Can be one of "created", - "running", "paused", "restarting", "removing", "exited", or "dead". - type: "string" - enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] - example: "running" - Running: - description: | - Whether this container is running. - - Note that a running container can be _paused_. The `Running` and `Paused` - booleans are not mutually exclusive: - - When pausing a container (on Linux), the freezer cgroup is used to suspend - all processes in the container. Freezing the process requires the process to - be running. As a result, paused containers are both `Running` _and_ `Paused`. - - Use the `Status` field instead to determine if a container's state is "running". - type: "boolean" - example: true - Paused: - description: "Whether this container is paused." - type: "boolean" - example: false - Restarting: - description: "Whether this container is restarting." - type: "boolean" - example: false - OOMKilled: - description: | - Whether a process within this container has been killed because it ran - out of memory since the container was last started. - type: "boolean" - example: false - Dead: - type: "boolean" - example: false - Pid: - description: "The process ID of this container" - type: "integer" - example: 1234 - ExitCode: - description: "The last exit code of this container" - type: "integer" - example: 0 - Error: - type: "string" - StartedAt: - description: "The time when this container was last started." - type: "string" - example: "2020-01-06T09:06:59.461876391Z" - FinishedAt: - description: "The time when this container last exited." - type: "string" - example: "2020-01-06T09:07:59.461876391Z" - Health: - $ref: "#/definitions/Health" - - ContainerCreateResponse: - description: "OK response to ContainerCreate operation" - type: "object" - title: "ContainerCreateResponse" - x-go-name: "CreateResponse" - required: [Id, Warnings] - properties: - Id: - description: "The ID of the created container" - type: "string" - x-nullable: false - example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Warnings: - description: "Warnings encountered when creating the container" - type: "array" - x-nullable: false - items: - type: "string" - example: [] - - ContainerUpdateResponse: - type: "object" - title: "ContainerUpdateResponse" - x-go-name: "UpdateResponse" - description: |- - Response for a successful container-update. - properties: - Warnings: - type: "array" - description: |- - Warnings encountered when updating the container. - items: - type: "string" - example: ["Published ports are discarded when using host network mode"] - - ContainerStatsResponse: - description: | - Statistics sample for a container. - type: "object" - x-go-name: "StatsResponse" - title: "ContainerStatsResponse" - properties: - name: - description: "Name of the container" - type: "string" - x-nullable: true - example: "boring_wozniak" - id: - description: "ID of the container" - type: "string" - x-nullable: true - example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - read: - description: | - Date and time at which this sample was collected. - The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) - with nano-seconds. - type: "string" - format: "date-time" - example: "2025-01-16T13:55:22.165243637Z" - preread: - description: | - Date and time at which this first sample was collected. This field - is not propagated if the "one-shot" option is set. If the "one-shot" - option is set, this field may be omitted, empty, or set to a default - date (`0001-01-01T00:00:00Z`). - - The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) - with nano-seconds. - type: "string" - format: "date-time" - example: "2025-01-16T13:55:21.160452595Z" - pids_stats: - $ref: "#/definitions/ContainerPidsStats" - blkio_stats: - $ref: "#/definitions/ContainerBlkioStats" - num_procs: - description: | - The number of processors on the system. - - This field is Windows-specific and always zero for Linux containers. - type: "integer" - format: "uint32" - example: 16 - storage_stats: - $ref: "#/definitions/ContainerStorageStats" - cpu_stats: - $ref: "#/definitions/ContainerCPUStats" - precpu_stats: - $ref: "#/definitions/ContainerCPUStats" - memory_stats: - $ref: "#/definitions/ContainerMemoryStats" - networks: - description: | - Network statistics for the container per interface. - - This field is omitted if the container has no networking enabled. - x-nullable: true - additionalProperties: - $ref: "#/definitions/ContainerNetworkStats" - example: - eth0: - rx_bytes: 5338 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 36 - tx_bytes: 648 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 8 - eth5: - rx_bytes: 4641 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 26 - tx_bytes: 690 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 9 - - ContainerBlkioStats: - description: | - BlkioStats stores all IO service stats for data read and write. - - This type is Linux-specific and holds many fields that are specific to cgroups v1. - On a cgroup v2 host, all fields other than `io_service_bytes_recursive` - are omitted or `null`. - - This type is only populated on Linux and omitted for Windows containers. - type: "object" - x-go-name: "BlkioStats" - x-nullable: true - properties: - io_service_bytes_recursive: - type: "array" - items: - $ref: "#/definitions/ContainerBlkioStatEntry" - io_serviced_recursive: - description: | - This field is only available when using Linux containers with - cgroups v1. It is omitted or `null` when using cgroups v2. - x-nullable: true - type: "array" - items: - $ref: "#/definitions/ContainerBlkioStatEntry" - io_queue_recursive: - description: | - This field is only available when using Linux containers with - cgroups v1. It is omitted or `null` when using cgroups v2. - x-nullable: true - type: "array" - items: - $ref: "#/definitions/ContainerBlkioStatEntry" - io_service_time_recursive: - description: | - This field is only available when using Linux containers with - cgroups v1. It is omitted or `null` when using cgroups v2. - x-nullable: true - type: "array" - items: - $ref: "#/definitions/ContainerBlkioStatEntry" - io_wait_time_recursive: - description: | - This field is only available when using Linux containers with - cgroups v1. It is omitted or `null` when using cgroups v2. - x-nullable: true - type: "array" - items: - $ref: "#/definitions/ContainerBlkioStatEntry" - io_merged_recursive: - description: | - This field is only available when using Linux containers with - cgroups v1. It is omitted or `null` when using cgroups v2. - x-nullable: true - type: "array" - items: - $ref: "#/definitions/ContainerBlkioStatEntry" - io_time_recursive: - description: | - This field is only available when using Linux containers with - cgroups v1. It is omitted or `null` when using cgroups v2. - x-nullable: true - type: "array" - items: - $ref: "#/definitions/ContainerBlkioStatEntry" - sectors_recursive: - description: | - This field is only available when using Linux containers with - cgroups v1. It is omitted or `null` when using cgroups v2. - x-nullable: true - type: "array" - items: - $ref: "#/definitions/ContainerBlkioStatEntry" - example: - io_service_bytes_recursive: [ - {"major": 254, "minor": 0, "op": "read", "value": 7593984}, - {"major": 254, "minor": 0, "op": "write", "value": 100} - ] - io_serviced_recursive: null - io_queue_recursive: null - io_service_time_recursive: null - io_wait_time_recursive: null - io_merged_recursive: null - io_time_recursive: null - sectors_recursive: null - - ContainerBlkioStatEntry: - description: | - Blkio stats entry. - - This type is Linux-specific and omitted for Windows containers. - type: "object" - x-go-name: "BlkioStatEntry" - x-nullable: true - properties: - major: - type: "integer" - format: "uint64" - example: 254 - minor: - type: "integer" - format: "uint64" - example: 0 - op: - type: "string" - example: "read" - value: - type: "integer" - format: "uint64" - example: 7593984 - - ContainerCPUStats: - description: | - CPU related info of the container - type: "object" - x-go-name: "CPUStats" - x-nullable: true - properties: - cpu_usage: - $ref: "#/definitions/ContainerCPUUsage" - system_cpu_usage: - description: | - System Usage. - - This field is Linux-specific and omitted for Windows containers. - type: "integer" - format: "uint64" - x-nullable: true - example: 5 - online_cpus: - description: | - Number of online CPUs. - - This field is Linux-specific and omitted for Windows containers. - type: "integer" - format: "uint32" - x-nullable: true - example: 5 - throttling_data: - $ref: "#/definitions/ContainerThrottlingData" - - ContainerCPUUsage: - description: | - All CPU stats aggregated since container inception. - type: "object" - x-go-name: "CPUUsage" - x-nullable: true - properties: - total_usage: - description: | - Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). - type: "integer" - format: "uint64" - example: 29912000 - percpu_usage: - description: | - Total CPU time (in nanoseconds) consumed per core (Linux). - - This field is Linux-specific when using cgroups v1. It is omitted - when using cgroups v2 and Windows containers. - type: "array" - x-nullable: true - items: - type: "integer" - format: "uint64" - example: 29912000 - - usage_in_kernelmode: - description: | - Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), - or time spent (in 100's of nanoseconds) by all container processes in - kernel mode (Windows). - - Not populated for Windows containers using Hyper-V isolation. - type: "integer" - format: "uint64" - example: 21994000 - usage_in_usermode: - description: | - Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), - or time spent (in 100's of nanoseconds) by all container processes in - kernel mode (Windows). - - Not populated for Windows containers using Hyper-V isolation. - type: "integer" - format: "uint64" - example: 7918000 - - ContainerPidsStats: - description: | - PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). - - This type is Linux-specific and omitted for Windows containers. - type: "object" - x-go-name: "PidsStats" - x-nullable: true - properties: - current: - description: | - Current is the number of PIDs in the cgroup. - type: "integer" - format: "uint64" - x-nullable: true - example: 5 - limit: - description: | - Limit is the hard limit on the number of pids in the cgroup. - A "Limit" of 0 means that there is no limit. - type: "integer" - format: "uint64" - x-nullable: true - example: 18446744073709551615 - - ContainerThrottlingData: - description: | - CPU throttling stats of the container. - - This type is Linux-specific and omitted for Windows containers. - type: "object" - x-go-name: "ThrottlingData" - x-nullable: true - properties: - periods: - description: | - Number of periods with throttling active. - type: "integer" - format: "uint64" - example: 0 - throttled_periods: - description: | - Number of periods when the container hit its throttling limit. - type: "integer" - format: "uint64" - example: 0 - throttled_time: - description: | - Aggregated time (in nanoseconds) the container was throttled for. - type: "integer" - format: "uint64" - example: 0 - - ContainerMemoryStats: - description: | - Aggregates all memory stats since container inception on Linux. - Windows returns stats for commit and private working set only. - type: "object" - x-go-name: "MemoryStats" - properties: - usage: - description: | - Current `res_counter` usage for memory. - - This field is Linux-specific and omitted for Windows containers. - type: "integer" - format: "uint64" - x-nullable: true - example: 0 - max_usage: - description: | - Maximum usage ever recorded. - - This field is Linux-specific and only supported on cgroups v1. - It is omitted when using cgroups v2 and for Windows containers. - type: "integer" - format: "uint64" - x-nullable: true - example: 0 - stats: - description: | - All the stats exported via memory.stat. when using cgroups v2. - - This field is Linux-specific and omitted for Windows containers. - type: "object" - additionalProperties: - type: "integer" - format: "uint64" - x-nullable: true - example: - { - "active_anon": 1572864, - "active_file": 5115904, - "anon": 1572864, - "anon_thp": 0, - "file": 7626752, - "file_dirty": 0, - "file_mapped": 2723840, - "file_writeback": 0, - "inactive_anon": 0, - "inactive_file": 2510848, - "kernel_stack": 16384, - "pgactivate": 0, - "pgdeactivate": 0, - "pgfault": 2042, - "pglazyfree": 0, - "pglazyfreed": 0, - "pgmajfault": 45, - "pgrefill": 0, - "pgscan": 0, - "pgsteal": 0, - "shmem": 0, - "slab": 1180928, - "slab_reclaimable": 725576, - "slab_unreclaimable": 455352, - "sock": 0, - "thp_collapse_alloc": 0, - "thp_fault_alloc": 1, - "unevictable": 0, - "workingset_activate": 0, - "workingset_nodereclaim": 0, - "workingset_refault": 0 - } - failcnt: - description: | - Number of times memory usage hits limits. - - This field is Linux-specific and only supported on cgroups v1. - It is omitted when using cgroups v2 and for Windows containers. - type: "integer" - format: "uint64" - x-nullable: true - example: 0 - limit: - description: | - This field is Linux-specific and omitted for Windows containers. - type: "integer" - format: "uint64" - x-nullable: true - example: 8217579520 - commitbytes: - description: | - Committed bytes. - - This field is Windows-specific and omitted for Linux containers. - type: "integer" - format: "uint64" - x-nullable: true - example: 0 - commitpeakbytes: - description: | - Peak committed bytes. - - This field is Windows-specific and omitted for Linux containers. - type: "integer" - format: "uint64" - x-nullable: true - example: 0 - privateworkingset: - description: | - Private working set. - - This field is Windows-specific and omitted for Linux containers. - type: "integer" - format: "uint64" - x-nullable: true - example: 0 - - ContainerNetworkStats: - description: | - Aggregates the network stats of one container - type: "object" - x-go-name: "NetworkStats" - x-nullable: true - properties: - rx_bytes: - description: | - Bytes received. Windows and Linux. - type: "integer" - format: "uint64" - example: 5338 - rx_packets: - description: | - Packets received. Windows and Linux. - type: "integer" - format: "uint64" - example: 36 - rx_errors: - description: | - Received errors. Not used on Windows. - - This field is Linux-specific and always zero for Windows containers. - type: "integer" - format: "uint64" - example: 0 - rx_dropped: - description: | - Incoming packets dropped. Windows and Linux. - type: "integer" - format: "uint64" - example: 0 - tx_bytes: - description: | - Bytes sent. Windows and Linux. - type: "integer" - format: "uint64" - example: 1200 - tx_packets: - description: | - Packets sent. Windows and Linux. - type: "integer" - format: "uint64" - example: 12 - tx_errors: - description: | - Sent errors. Not used on Windows. - - This field is Linux-specific and always zero for Windows containers. - type: "integer" - format: "uint64" - example: 0 - tx_dropped: - description: | - Outgoing packets dropped. Windows and Linux. - type: "integer" - format: "uint64" - example: 0 - endpoint_id: - description: | - Endpoint ID. Not used on Linux. - - This field is Windows-specific and omitted for Linux containers. - type: "string" - x-nullable: true - instance_id: - description: | - Instance ID. Not used on Linux. - - This field is Windows-specific and omitted for Linux containers. - type: "string" - x-nullable: true - - ContainerStorageStats: - description: | - StorageStats is the disk I/O stats for read/write on Windows. - - This type is Windows-specific and omitted for Linux containers. - type: "object" - x-go-name: "StorageStats" - x-nullable: true - properties: - read_count_normalized: - type: "integer" - format: "uint64" - x-nullable: true - example: 7593984 - read_size_bytes: - type: "integer" - format: "uint64" - x-nullable: true - example: 7593984 - write_count_normalized: - type: "integer" - format: "uint64" - x-nullable: true - example: 7593984 - write_size_bytes: - type: "integer" - format: "uint64" - x-nullable: true - example: 7593984 - - ContainerTopResponse: - type: "object" - x-go-name: "TopResponse" - title: "ContainerTopResponse" - description: |- - Container "top" response. - properties: - Titles: - description: "The ps column titles" - type: "array" - items: - type: "string" - example: - Titles: - - "UID" - - "PID" - - "PPID" - - "C" - - "STIME" - - "TTY" - - "TIME" - - "CMD" - Processes: - description: |- - Each process running in the container, where each process - is an array of values corresponding to the titles. - type: "array" - items: - type: "array" - items: - type: "string" - example: - Processes: - - - - "root" - - "13642" - - "882" - - "0" - - "17:03" - - "pts/0" - - "00:00:00" - - "/bin/bash" - - - - "root" - - "13735" - - "13642" - - "0" - - "17:06" - - "pts/0" - - "00:00:00" - - "sleep 10" - - ContainerWaitResponse: - description: "OK response to ContainerWait operation" - type: "object" - x-go-name: "WaitResponse" - title: "ContainerWaitResponse" - required: [StatusCode] - properties: - StatusCode: - description: "Exit code of the container" - type: "integer" - format: "int64" - x-nullable: false - Error: - $ref: "#/definitions/ContainerWaitExitError" - - ContainerWaitExitError: - description: "container waiting error, if any" - type: "object" - x-go-name: "WaitExitError" - properties: - Message: - description: "Details of an error" - type: "string" - - SystemVersion: - type: "object" - description: | - Response of Engine API: GET "/version" - properties: - Platform: - type: "object" - required: [Name] - properties: - Name: - type: "string" - Components: - type: "array" - description: | - Information about system components - items: - type: "object" - x-go-name: ComponentVersion - required: [Name, Version] - properties: - Name: - description: | - Name of the component - type: "string" - example: "Engine" - Version: - description: | - Version of the component - type: "string" - x-nullable: false - example: "27.0.1" - Details: - description: | - Key/value pairs of strings with additional information about the - component. These values are intended for informational purposes - only, and their content is not defined, and not part of the API - specification. - - These messages can be printed by the client as information to the user. - type: "object" - x-nullable: true - Version: - description: "The version of the daemon" - type: "string" - example: "27.0.1" - ApiVersion: - description: | - The default (and highest) API version that is supported by the daemon - type: "string" - example: "1.47" - MinAPIVersion: - description: | - The minimum API version that is supported by the daemon - type: "string" - example: "1.24" - GitCommit: - description: | - The Git commit of the source code that was used to build the daemon - type: "string" - example: "48a66213fe" - GoVersion: - description: | - The version Go used to compile the daemon, and the version of the Go - runtime in use. - type: "string" - example: "go1.22.7" - Os: - description: | - The operating system that the daemon is running on ("linux" or "windows") - type: "string" - example: "linux" - Arch: - description: | - The architecture that the daemon is running on - type: "string" - example: "amd64" - KernelVersion: - description: | - The kernel version (`uname -r`) that the daemon is running on. - - This field is omitted when empty. - type: "string" - example: "6.8.0-31-generic" - Experimental: - description: | - Indicates if the daemon is started with experimental features enabled. - - This field is omitted when empty / false. - type: "boolean" - example: true - BuildTime: - description: | - The date and time that the daemon was compiled. - type: "string" - example: "2020-06-22T15:49:27.000000000+00:00" - - SystemInfo: - type: "object" - properties: - ID: - description: | - Unique identifier of the daemon. - -


- - > **Note**: The format of the ID itself is not part of the API, and - > should not be considered stable. - type: "string" - example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" - Containers: - description: "Total number of containers on the host." - type: "integer" - example: 14 - ContainersRunning: - description: | - Number of containers with status `"running"`. - type: "integer" - example: 3 - ContainersPaused: - description: | - Number of containers with status `"paused"`. - type: "integer" - example: 1 - ContainersStopped: - description: | - Number of containers with status `"stopped"`. - type: "integer" - example: 10 - Images: - description: | - Total number of images on the host. - - Both _tagged_ and _untagged_ (dangling) images are counted. - type: "integer" - example: 508 - Driver: - description: "Name of the storage driver in use." - type: "string" - example: "overlay2" - DriverStatus: - description: | - Information specific to the storage driver, provided as - "label" / "value" pairs. - - This information is provided by the storage driver, and formatted - in a way consistent with the output of `docker info` on the command - line. - -


- - > **Note**: The information returned in this field, including the - > formatting of values and labels, should not be considered stable, - > and may change without notice. - type: "array" - items: - type: "array" - items: - type: "string" - example: - - ["Backing Filesystem", "extfs"] - - ["Supports d_type", "true"] - - ["Native Overlay Diff", "true"] - DockerRootDir: - description: | - Root directory of persistent Docker state. - - Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` - on Windows. - type: "string" - example: "/var/lib/docker" - Plugins: - $ref: "#/definitions/PluginsInfo" - MemoryLimit: - description: "Indicates if the host has memory limit support enabled." - type: "boolean" - example: true - SwapLimit: - description: "Indicates if the host has memory swap limit support enabled." - type: "boolean" - example: true - KernelMemoryTCP: - description: | - Indicates if the host has kernel memory TCP limit support enabled. This - field is omitted if not supported. - - Kernel memory TCP limits are not supported when using cgroups v2, which - does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. - type: "boolean" - example: true - CpuCfsPeriod: - description: | - Indicates if CPU CFS(Completely Fair Scheduler) period is supported by - the host. - type: "boolean" - example: true - CpuCfsQuota: - description: | - Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by - the host. - type: "boolean" - example: true - CPUShares: - description: | - Indicates if CPU Shares limiting is supported by the host. - type: "boolean" - example: true - CPUSet: - description: | - Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. - - See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) - type: "boolean" - example: true - PidsLimit: - description: "Indicates if the host kernel has PID limit support enabled." - type: "boolean" - example: true - OomKillDisable: - description: "Indicates if OOM killer disable is supported on the host." - type: "boolean" - IPv4Forwarding: - description: "Indicates IPv4 forwarding is enabled." - type: "boolean" - example: true - BridgeNfIptables: - description: | - Indicates if `bridge-nf-call-iptables` is available on the host when - the daemon was started. - -


- - > **Deprecated**: netfilter module is now loaded on-demand and no longer - > during daemon startup, making this field obsolete. This field is always - > `false` and will be removed in a API v1.49. - type: "boolean" - example: false - BridgeNfIp6tables: - description: | - Indicates if `bridge-nf-call-ip6tables` is available on the host. - -


- - > **Deprecated**: netfilter module is now loaded on-demand, and no longer - > during daemon startup, making this field obsolete. This field is always - > `false` and will be removed in a API v1.49. - type: "boolean" - example: false - Debug: - description: | - Indicates if the daemon is running in debug-mode / with debug-level - logging enabled. - type: "boolean" - example: true - NFd: - description: | - The total number of file Descriptors in use by the daemon process. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 64 - NGoroutines: - description: | - The number of goroutines that currently exist. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 174 - SystemTime: - description: | - Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) - format with nano-seconds. - type: "string" - example: "2017-08-08T20:28:29.06202363Z" - LoggingDriver: - description: | - The logging driver to use as a default for new containers. - type: "string" - CgroupDriver: - description: | - The driver to use for managing cgroups. - type: "string" - enum: ["cgroupfs", "systemd", "none"] - default: "cgroupfs" - example: "cgroupfs" - CgroupVersion: - description: | - The version of the cgroup. - type: "string" - enum: ["1", "2"] - default: "1" - example: "1" - NEventsListener: - description: "Number of event listeners subscribed." - type: "integer" - example: 30 - KernelVersion: - description: | - Kernel version of the host. - - On Linux, this information obtained from `uname`. On Windows this - information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ - registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. - type: "string" - example: "6.8.0-31-generic" - OperatingSystem: - description: | - Name of the host's operating system, for example: "Ubuntu 24.04 LTS" - or "Windows Server 2016 Datacenter" - type: "string" - example: "Ubuntu 24.04 LTS" - OSVersion: - description: | - Version of the host's operating system - -


- - > **Note**: The information returned in this field, including its - > very existence, and the formatting of values, should not be considered - > stable, and may change without notice. - type: "string" - example: "24.04" - OSType: - description: | - Generic type of the operating system of the host, as returned by the - Go runtime (`GOOS`). - - Currently returned values are "linux" and "windows". A full list of - possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). - type: "string" - example: "linux" - Architecture: - description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). - - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). - type: "string" - example: "x86_64" - NCPU: - description: | - The number of logical CPUs usable by the daemon. - - The number of available CPUs is checked by querying the operating - system when the daemon starts. Changes to operating system CPU - allocation after the daemon is started are not reflected. - type: "integer" - example: 4 - MemTotal: - description: | - Total amount of physical memory available on the host, in bytes. - type: "integer" - format: "int64" - example: 2095882240 - - IndexServerAddress: - description: | - Address / URL of the index server that is used for image search, - and as a default for user authentication for Docker Hub and Docker Cloud. - default: "https://index.docker.io/v1/" - type: "string" - example: "https://index.docker.io/v1/" - RegistryConfig: - $ref: "#/definitions/RegistryServiceConfig" - GenericResources: - $ref: "#/definitions/GenericResources" - HttpProxy: - description: | - HTTP-proxy configured for the daemon. This value is obtained from the - [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL - are masked in the API response. - - Containers do not automatically inherit this configuration. - type: "string" - example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" - HttpsProxy: - description: | - HTTPS-proxy configured for the daemon. This value is obtained from the - [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL - are masked in the API response. - - Containers do not automatically inherit this configuration. - type: "string" - example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" - NoProxy: - description: | - Comma-separated list of domain extensions for which no proxy should be - used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) - environment variable. - - Containers do not automatically inherit this configuration. - type: "string" - example: "*.local, 169.254/16" - Name: - description: "Hostname of the host." - type: "string" - example: "node5.corp.example.com" - Labels: - description: | - User-defined labels (key/value metadata) as set on the daemon. - -


- - > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, - > set through the daemon configuration, and _node_ labels, set from a - > manager node in the Swarm. Node labels are not included in this - > field. Node labels can be retrieved using the `/nodes/(id)` endpoint - > on a manager node in the Swarm. - type: "array" - items: - type: "string" - example: ["storage=ssd", "production"] - ExperimentalBuild: - description: | - Indicates if experimental features are enabled on the daemon. - type: "boolean" - example: true - ServerVersion: - description: | - Version string of the daemon. - type: "string" - example: "27.0.1" - Runtimes: - description: | - List of [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtimes configured on the daemon. Keys hold the "name" used to - reference the runtime. - - The Docker daemon relies on an OCI compliant runtime (invoked via the - `containerd` daemon) as its interface to the Linux kernel namespaces, - cgroups, and SELinux. - - The default runtime is `runc`, and automatically configured. Additional - runtimes can be configured by the user and will be listed here. - type: "object" - additionalProperties: - $ref: "#/definitions/Runtime" - default: - runc: - path: "runc" - example: - runc: - path: "runc" - runc-master: - path: "/go/bin/runc" - custom: - path: "/usr/local/bin/my-oci-runtime" - runtimeArgs: ["--debug", "--systemd-cgroup=false"] - DefaultRuntime: - description: | - Name of the default OCI runtime that is used when starting containers. - - The default can be overridden per-container at create time. - type: "string" - default: "runc" - example: "runc" - Swarm: - $ref: "#/definitions/SwarmInfo" - LiveRestoreEnabled: - description: | - Indicates if live restore is enabled. - - If enabled, containers are kept running when the daemon is shutdown - or upon daemon start if running containers are detected. - type: "boolean" - default: false - example: false - Isolation: - description: | - Represents the isolation technology to use as a default for containers. - The supported values are platform-specific. - - If no isolation value is specified on daemon start, on Windows client, - the default is `hyperv`, and on Windows server, the default is `process`. - - This option is currently not used on other platforms. - default: "default" - type: "string" - enum: - - "default" - - "hyperv" - - "process" - - "" - InitBinary: - description: | - Name and, optional, path of the `docker-init` binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "docker-init" - ContainerdCommit: - $ref: "#/definitions/Commit" - RuncCommit: - $ref: "#/definitions/Commit" - InitCommit: - $ref: "#/definitions/Commit" - SecurityOptions: - description: | - List of security features that are enabled on the daemon, such as - apparmor, seccomp, SELinux, user-namespaces (userns), rootless and - no-new-privileges. - - Additional configuration options for each security feature may - be present, and are included as a comma-separated list of key/value - pairs. - type: "array" - items: - type: "string" - example: - - "name=apparmor" - - "name=seccomp,profile=default" - - "name=selinux" - - "name=userns" - - "name=rootless" - ProductLicense: - description: | - Reports a summary of the product license on the daemon. - - If a commercial license has been applied to the daemon, information - such as number of nodes, and expiration are included. - type: "string" - example: "Community Engine" - DefaultAddressPools: - description: | - List of custom default address pools for local networks, which can be - specified in the daemon.json file or dockerd option. - - Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 - 10.10.[0-255].0/24 address pools. - type: "array" - items: - type: "object" - properties: - Base: - description: "The network address in CIDR format" - type: "string" - example: "10.10.0.0/16" - Size: - description: "The network pool size" - type: "integer" - example: "24" - FirewallBackend: - $ref: "#/definitions/FirewallInfo" - DiscoveredDevices: - description: | - List of devices discovered by device drivers. - - Each device includes information about its source driver, kind, name, - and additional driver-specific attributes. - type: "array" - items: - $ref: "#/definitions/DeviceInfo" - Warnings: - description: | - List of warnings / informational messages about missing features, or - issues related to the daemon configuration. - - These messages can be printed by the client as information to the user. - type: "array" - items: - type: "string" - example: - - "WARNING: No memory limit support" - CDISpecDirs: - description: | - List of directories where (Container Device Interface) CDI - specifications are located. - - These specifications define vendor-specific modifications to an OCI - runtime specification for a container being created. - - An empty list indicates that CDI device injection is disabled. - - Note that since using CDI device injection requires the daemon to have - experimental enabled. For non-experimental daemons an empty list will - always be returned. - type: "array" - items: - type: "string" - example: - - "/etc/cdi" - - "/var/run/cdi" - Containerd: - $ref: "#/definitions/ContainerdInfo" - - ContainerdInfo: - description: | - Information for connecting to the containerd instance that is used by the daemon. - This is included for debugging purposes only. - type: "object" - x-nullable: true - properties: - Address: - description: "The address of the containerd socket." - type: "string" - example: "/run/containerd/containerd.sock" - Namespaces: - description: | - The namespaces that the daemon uses for running containers and - plugins in containerd. These namespaces can be configured in the - daemon configuration, and are considered to be used exclusively - by the daemon, Tampering with the containerd instance may cause - unexpected behavior. - - As these namespaces are considered to be exclusively accessed - by the daemon, it is not recommended to change these values, - or to change them to a value that is used by other systems, - such as cri-containerd. - type: "object" - properties: - Containers: - description: | - The default containerd namespace used for containers managed - by the daemon. - - The default namespace for containers is "moby", but will be - suffixed with the `.` of the remapped `root` if - user-namespaces are enabled and the containerd image-store - is used. - type: "string" - default: "moby" - example: "moby" - Plugins: - description: | - The default containerd namespace used for plugins managed by - the daemon. - - The default namespace for plugins is "plugins.moby", but will be - suffixed with the `.` of the remapped `root` if - user-namespaces are enabled and the containerd image-store - is used. - type: "string" - default: "plugins.moby" - example: "plugins.moby" - - FirewallInfo: - description: | - Information about the daemon's firewalling configuration. - - This field is currently only used on Linux, and omitted on other platforms. - type: "object" - x-nullable: true - properties: - Driver: - description: | - The name of the firewall backend driver. - type: "string" - example: "nftables" - Info: - description: | - Information about the firewall backend, provided as - "label" / "value" pairs. - -


- - > **Note**: The information returned in this field, including the - > formatting of values and labels, should not be considered stable, - > and may change without notice. - type: "array" - items: - type: "array" - items: - type: "string" - example: - - ["ReloadedAt", "2025-01-01T00:00:00Z"] - - # PluginsInfo is a temp struct holding Plugins name - # registered with docker daemon. It is used by Info struct - PluginsInfo: - description: | - Available plugins per type. - -


- - > **Note**: Only unmanaged (V1) plugins are included in this list. - > V1 plugins are "lazily" loaded, and are not returned in this list - > if there is no resource using the plugin. - type: "object" - properties: - Volume: - description: "Names of available volume-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["local"] - Network: - description: "Names of available network-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] - Authorization: - description: "Names of available authorization plugins." - type: "array" - items: - type: "string" - example: ["img-authz-plugin", "hbm"] - Log: - description: "Names of available logging-drivers, and logging-driver plugins." - type: "array" - items: - type: "string" - example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] - - - RegistryServiceConfig: - description: | - RegistryServiceConfig stores daemon registry services configuration. - type: "object" - x-nullable: true - properties: - InsecureRegistryCIDRs: - description: | - List of IP ranges of insecure registries, using the CIDR syntax - ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries - accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates - from unknown CAs) communication. - - By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as - insecure. All other registries are secure. Communicating with an - insecure registry is not possible if the daemon assumes that registry - is secure. - - This configuration override this behavior, insecure communication with - registries whose resolved IP address is within the subnet described by - the CIDR syntax. - - Registries can also be marked insecure by hostname. Those registries - are listed under `IndexConfigs` and have their `Secure` field set to - `false`. - - > **Warning**: Using this option can be useful when running a local - > registry, but introduces security vulnerabilities. This option - > should therefore ONLY be used for testing purposes. For increased - > security, users should add their CA to their system's list of trusted - > CAs instead of enabling this option. - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - IndexConfigs: - type: "object" - additionalProperties: - $ref: "#/definitions/IndexInfo" - example: - "127.0.0.1:5000": - "Name": "127.0.0.1:5000" - "Mirrors": [] - "Secure": false - "Official": false - "[2001:db8:a0b:12f0::1]:80": - "Name": "[2001:db8:a0b:12f0::1]:80" - "Mirrors": [] - "Secure": false - "Official": false - "docker.io": - Name: "docker.io" - Mirrors: ["https://hub-mirror.corp.example.com:5000/"] - Secure: true - Official: true - "registry.internal.corp.example.com:3000": - Name: "registry.internal.corp.example.com:3000" - Mirrors: [] - Secure: false - Official: false - Mirrors: - description: | - List of registry URLs that act as a mirror for the official - (`docker.io`) registry. - - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://[2001:db8:a0b:12f0::1]/" - - IndexInfo: - description: - IndexInfo contains information about a registry. - type: "object" - x-nullable: true - properties: - Name: - description: | - Name of the registry, such as "docker.io". - type: "string" - example: "docker.io" - Mirrors: - description: | - List of mirrors, expressed as URIs. - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://registry-2.docker.io/" - - "https://registry-3.docker.io/" - Secure: - description: | - Indicates if the registry is part of the list of insecure - registries. - - If `false`, the registry is insecure. Insecure registries accept - un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from - unknown CAs) communication. - - > **Warning**: Insecure registries can be useful when running a local - > registry. However, because its use creates security vulnerabilities - > it should ONLY be enabled for testing purposes. For increased - > security, users should add their CA to their system's list of - > trusted CAs instead of enabling this option. - type: "boolean" - example: true - Official: - description: | - Indicates whether this is an official registry (i.e., Docker Hub / docker.io) - type: "boolean" - example: true - - Runtime: - description: | - Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtime. - - The runtime is invoked by the daemon via the `containerd` daemon. OCI - runtimes act as an interface to the Linux kernel namespaces, cgroups, - and SELinux. - type: "object" - properties: - path: - description: | - Name and, optional, path, of the OCI executable binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "/usr/local/bin/my-oci-runtime" - runtimeArgs: - description: | - List of command-line arguments to pass to the runtime when invoked. - type: "array" - x-nullable: true - items: - type: "string" - example: ["--debug", "--systemd-cgroup=false"] - status: - description: | - Information specific to the runtime. - - While this API specification does not define data provided by runtimes, - the following well-known properties may be provided by runtimes: - - `org.opencontainers.runtime-spec.features`: features structure as defined - in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), - in a JSON string representation. - -


- - > **Note**: The information returned in this field, including the - > formatting of values and labels, should not be considered stable, - > and may change without notice. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" - - Commit: - description: | - Commit holds the Git-commit (SHA1) that a binary was built from, as - reported in the version-string of external tools, such as `containerd`, - or `runC`. - type: "object" - properties: - ID: - description: "Actual commit ID of external tool." - type: "string" - example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" - - SwarmInfo: - description: | - Represents generic information about swarm. - type: "object" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - default: "" - example: "k67qz4598weg5unwwffg6z1m1" - NodeAddr: - description: | - IP address at which this node can be reached by other nodes in the - swarm. - type: "string" - default: "" - example: "10.0.0.46" - LocalNodeState: - $ref: "#/definitions/LocalNodeState" - ControlAvailable: - type: "boolean" - default: false - example: true - Error: - type: "string" - default: "" - RemoteManagers: - description: | - List of ID's and addresses of other managers in the swarm. - type: "array" - default: null - x-nullable: true - items: - $ref: "#/definitions/PeerNode" - example: - - NodeID: "71izy0goik036k48jg985xnds" - Addr: "10.0.0.158:2377" - - NodeID: "79y6h1o4gv8n120drcprv5nmc" - Addr: "10.0.0.159:2377" - - NodeID: "k67qz4598weg5unwwffg6z1m1" - Addr: "10.0.0.46:2377" - Nodes: - description: "Total number of nodes in the swarm." - type: "integer" - x-nullable: true - example: 4 - Managers: - description: "Total number of managers in the swarm." - type: "integer" - x-nullable: true - example: 3 - Cluster: - $ref: "#/definitions/ClusterInfo" - - LocalNodeState: - description: "Current local status of this node." - type: "string" - default: "" - enum: - - "" - - "inactive" - - "pending" - - "active" - - "error" - - "locked" - example: "active" - - PeerNode: - description: "Represents a peer-node in the swarm" - type: "object" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - Addr: - description: | - IP address and ports at which this node can be reached. - type: "string" - - NetworkAttachmentConfig: - description: | - Specifies how a service should be attached to a particular network. - type: "object" - properties: - Target: - description: | - The target network for attachment. Must be a network name or ID. - type: "string" - Aliases: - description: | - Discoverable alternate names for the service on this network. - type: "array" - items: - type: "string" - DriverOpts: - description: | - Driver attachment options for the network target. - type: "object" - additionalProperties: - type: "string" - - EventActor: - description: | - Actor describes something that generates events, like a container, network, - or a volume. - type: "object" - properties: - ID: - description: "The ID of the object emitting the event" - type: "string" - example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Attributes: - description: | - Various key/value attributes of the object, depending on its type. - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-label-value" - image: "alpine:latest" - name: "my-container" - - EventMessage: - description: | - EventMessage represents the information an event contains. - type: "object" - title: "SystemEventsResponse" - properties: - Type: - description: "The type of object emitting the event" - type: "string" - enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] - example: "container" - Action: - description: "The type of event" - type: "string" - example: "create" - Actor: - $ref: "#/definitions/EventActor" - scope: - description: | - Scope of the event. Engine events are `local` scope. Cluster (Swarm) - events are `swarm` scope. - type: "string" - enum: ["local", "swarm"] - time: - description: "Timestamp of event" - type: "integer" - format: "int64" - example: 1629574695 - timeNano: - description: "Timestamp of event, with nanosecond accuracy" - type: "integer" - format: "int64" - example: 1629574695515050031 - - OCIDescriptor: - type: "object" - x-go-name: Descriptor - description: | - A descriptor struct containing digest, media type, and size, as defined in - the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). - properties: - mediaType: - description: | - The media type of the object this schema refers to. - type: "string" - example: "application/vnd.oci.image.manifest.v1+json" - digest: - description: | - The digest of the targeted content. - type: "string" - example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" - size: - description: | - The size in bytes of the blob. - type: "integer" - format: "int64" - example: 424 - urls: - description: |- - List of URLs from which this object MAY be downloaded. - type: "array" - items: - type: "string" - format: "uri" - x-nullable: true - annotations: - description: |- - Arbitrary metadata relating to the targeted content. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - "com.docker.official-images.bashbrew.arch": "amd64" - "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" - "org.opencontainers.image.base.name": "scratch" - "org.opencontainers.image.created": "2025-01-27T00:00:00Z" - "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" - "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" - "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" - "org.opencontainers.image.version": "24.04" - data: - type: string - x-nullable: true - description: |- - Data is an embedding of the targeted content. This is encoded as a base64 - string when marshalled to JSON (automatically, by encoding/json). If - present, Data can be used directly to avoid fetching the targeted content. - example: null - platform: - $ref: "#/definitions/OCIPlatform" - artifactType: - description: |- - ArtifactType is the IANA media type of this artifact. - type: "string" - x-nullable: true - example: null - - OCIPlatform: - type: "object" - x-go-name: Platform - x-nullable: true - description: | - Describes the platform which the image in the manifest runs on, as defined - in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). - properties: - architecture: - description: | - The CPU architecture, for example `amd64` or `ppc64`. - type: "string" - example: "arm" - os: - description: | - The operating system, for example `linux` or `windows`. - type: "string" - example: "windows" - os.version: - description: | - Optional field specifying the operating system version, for example on - Windows `10.0.19041.1165`. - type: "string" - example: "10.0.19041.1165" - os.features: - description: | - Optional field specifying an array of strings, each listing a required - OS feature (for example on Windows `win32k`). - type: "array" - items: - type: "string" - example: - - "win32k" - variant: - description: | - Optional field specifying a variant of the CPU, for example `v7` to - specify ARMv7 when architecture is `arm`. - type: "string" - example: "v7" - - DistributionInspect: - type: "object" - x-go-name: DistributionInspect - title: "DistributionInspectResponse" - required: [Descriptor, Platforms] - description: | - Describes the result obtained from contacting the registry to retrieve - image metadata. - properties: - Descriptor: - $ref: "#/definitions/OCIDescriptor" - Platforms: - type: "array" - description: | - An array containing all platforms supported by the image. - items: - $ref: "#/definitions/OCIPlatform" - - ClusterVolume: - type: "object" - description: | - Options and information specific to, and only present on, Swarm CSI - cluster volumes. - properties: - ID: - type: "string" - description: | - The Swarm ID of this volume. Because cluster volumes are Swarm - objects, they have an ID, unlike non-cluster volumes. This ID can - be used to refer to the Volume instead of the name. - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ClusterVolumeSpec" - Info: - type: "object" - description: | - Information about the global status of the volume. - properties: - CapacityBytes: - type: "integer" - format: "int64" - description: | - The capacity of the volume in bytes. A value of 0 indicates that - the capacity is unknown. - VolumeContext: - type: "object" - description: | - A map of strings to strings returned from the storage plugin when - the volume is created. - additionalProperties: - type: "string" - VolumeID: - type: "string" - description: | - The ID of the volume as returned by the CSI storage plugin. This - is distinct from the volume's ID as provided by Docker. This ID - is never used by the user when communicating with Docker to refer - to this volume. If the ID is blank, then the Volume has not been - successfully created in the plugin yet. - AccessibleTopology: - type: "array" - description: | - The topology this volume is actually accessible from. - items: - $ref: "#/definitions/Topology" - PublishStatus: - type: "array" - description: | - The status of the volume as it pertains to its publishing and use on - specific nodes - items: - type: "object" - properties: - NodeID: - type: "string" - description: | - The ID of the Swarm node the volume is published on. - State: - type: "string" - description: | - The published state of the volume. - * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. - * `published` The volume is published successfully to the node. - * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. - * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. - enum: - - "pending-publish" - - "published" - - "pending-node-unpublish" - - "pending-controller-unpublish" - PublishContext: - type: "object" - description: | - A map of strings to strings returned by the CSI controller - plugin when a volume is published. - additionalProperties: - type: "string" - - ClusterVolumeSpec: - type: "object" - description: | - Cluster-specific options used to create the volume. - properties: - Group: - type: "string" - description: | - Group defines the volume group of this volume. Volumes belonging to - the same group can be referred to by group name when creating - Services. Referring to a volume by group instructs Swarm to treat - volumes in that group interchangeably for the purpose of scheduling. - Volumes with an empty string for a group technically all belong to - the same, emptystring group. - AccessMode: - type: "object" - description: | - Defines how the volume is used by tasks. - properties: - Scope: - type: "string" - description: | - The set of nodes this volume can be used on at one time. - - `single` The volume may only be scheduled to one node at a time. - - `multi` the volume may be scheduled to any supported number of nodes at a time. - default: "single" - enum: ["single", "multi"] - x-nullable: false - Sharing: - type: "string" - description: | - The number and way that different tasks can use this volume - at one time. - - `none` The volume may only be used by one task at a time. - - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly - - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. - - `all` The volume may have any number of readers and writers. - default: "none" - enum: ["none", "readonly", "onewriter", "all"] - x-nullable: false - MountVolume: - type: "object" - description: | - Options for using this volume as a Mount-type volume. - - Either MountVolume or BlockVolume, but not both, must be - present. - properties: - FsType: - type: "string" - description: | - Specifies the filesystem type for the mount volume. - Optional. - MountFlags: - type: "array" - description: | - Flags to pass when mounting the volume. Optional. - items: - type: "string" - BlockVolume: - type: "object" - description: | - Options for using this volume as a Block-type volume. - Intentionally empty. - Secrets: - type: "array" - description: | - Swarm Secrets that are passed to the CSI storage plugin when - operating on this volume. - items: - type: "object" - description: | - One cluster volume secret entry. Defines a key-value pair that - is passed to the plugin. - properties: - Key: - type: "string" - description: | - Key is the name of the key of the key-value pair passed to - the plugin. - Secret: - type: "string" - description: | - Secret is the swarm Secret object from which to read data. - This can be a Secret name or ID. The Secret data is - retrieved by swarm and used as the value of the key-value - pair passed to the plugin. - AccessibilityRequirements: - type: "object" - description: | - Requirements for the accessible topology of the volume. These - fields are optional. For an in-depth description of what these - fields mean, see the CSI specification. - properties: - Requisite: - type: "array" - description: | - A list of required topologies, at least one of which the - volume must be accessible from. - items: - $ref: "#/definitions/Topology" - Preferred: - type: "array" - description: | - A list of topologies that the volume should attempt to be - provisioned in. - items: - $ref: "#/definitions/Topology" - CapacityRange: - type: "object" - description: | - The desired capacity that the volume should be created with. If - empty, the plugin will decide the capacity. - properties: - RequiredBytes: - type: "integer" - format: "int64" - description: | - The volume must be at least this big. The value of 0 - indicates an unspecified minimum - LimitBytes: - type: "integer" - format: "int64" - description: | - The volume must not be bigger than this. The value of 0 - indicates an unspecified maximum. - Availability: - type: "string" - description: | - The availability of the volume for use in tasks. - - `active` The volume is fully available for scheduling on the cluster - - `pause` No new workloads should use the volume, but existing workloads are not stopped. - - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. - default: "active" - x-nullable: false - enum: - - "active" - - "pause" - - "drain" - - Topology: - description: | - A map of topological domains to topological segments. For in depth - details, see documentation for the Topology object in the CSI - specification. - type: "object" - additionalProperties: - type: "string" - - ImageManifestSummary: - x-go-name: "ManifestSummary" - description: | - ImageManifestSummary represents a summary of an image manifest. - type: "object" - required: ["ID", "Descriptor", "Available", "Size", "Kind"] - properties: - ID: - description: | - ID is the content-addressable ID of an image and is the same as the - digest of the image manifest. - type: "string" - example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" - Descriptor: - $ref: "#/definitions/OCIDescriptor" - Available: - description: Indicates whether all the child content (image config, layers) is fully available locally. - type: "boolean" - example: true - Size: - type: "object" - x-nullable: false - required: ["Content", "Total"] - properties: - Total: - type: "integer" - format: "int64" - example: 8213251 - description: | - Total is the total size (in bytes) of all the locally present - data (both distributable and non-distributable) that's related to - this manifest and its children. - This equal to the sum of [Content] size AND all the sizes in the - [Size] struct present in the Kind-specific data struct. - For example, for an image kind (Kind == "image") - this would include the size of the image content and unpacked - image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). - Content: - description: | - Content is the size (in bytes) of all the locally present - content in the content store (e.g. image config, layers) - referenced by this manifest and its children. - This only includes blobs in the content store. - type: "integer" - format: "int64" - example: 3987495 - Kind: - type: "string" - example: "image" - enum: - - "image" - - "attestation" - - "unknown" - description: | - The kind of the manifest. - - kind | description - -------------|----------------------------------------------------------- - image | Image manifest that can be used to start a container. - attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. - ImageData: - description: | - The image data for the image manifest. - This field is only populated when Kind is "image". - type: "object" - x-nullable: true - x-omitempty: true - required: ["Platform", "Containers", "Size", "UnpackedSize"] - properties: - Platform: - $ref: "#/definitions/OCIPlatform" - description: | - OCI platform of the image. This will be the platform specified in the - manifest descriptor from the index/manifest list. - If it's not available, it will be obtained from the image config. - Containers: - description: | - The IDs of the containers that are using this image. - type: "array" - items: - type: "string" - example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] - Size: - type: "object" - x-nullable: false - required: ["Unpacked"] - properties: - Unpacked: - type: "integer" - format: "int64" - example: 3987495 - description: | - Unpacked is the size (in bytes) of the locally unpacked - (uncompressed) image content that's directly usable by the containers - running this image. - It's independent of the distributable content - e.g. - the image might still have an unpacked data that's still used by - some container even when the distributable/compressed content is - already gone. - AttestationData: - description: | - The image data for the attestation manifest. - This field is only populated when Kind is "attestation". - type: "object" - x-nullable: true - x-omitempty: true - required: ["For"] - properties: - For: - description: | - The digest of the image manifest that this attestation is for. - type: "string" - example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" - -paths: - /containers/json: - get: - summary: "List containers" - description: | - Returns a list of containers. For details on the format, see the - [inspect endpoint](#operation/ContainerInspect). - - Note that it uses a different, smaller representation of a container - than inspecting a single container. For example, the list of linked - containers is not propagated . - operationId: "ContainerList" - produces: - - "application/json" - parameters: - - name: "all" - in: "query" - description: | - Return all containers. By default, only running containers are shown. - type: "boolean" - default: false - - name: "limit" - in: "query" - description: | - Return this number of most recently created containers, including - non-running ones. - type: "integer" - - name: "size" - in: "query" - description: | - Return the size of container as fields `SizeRw` and `SizeRootFs`. - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - Filters to process on the container list, encoded as JSON (a - `map[string][]string`). For example, `{"status": ["paused"]}` will - only return paused containers. - - Available filters: - - - `ancestor`=(`[:]`, ``, or ``) - - `before`=(`` or ``) - - `expose`=(`[/]`|`/[]`) - - `exited=` containers with exit code of `` - - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - - `id=` a container's ID - - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - - `is-task=`(`true`|`false`) - - `label=key` or `label="key=value"` of a container label - - `name=` a container's name - - `network`=(`` or ``) - - `publish`=(`[/]`|`/[]`) - - `since`=(`` or ``) - - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - - `volume`=(`` or ``) - type: "string" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/create: - post: - summary: "Create a container" - operationId: "ContainerCreate" - consumes: - - "application/json" - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "name" - in: "query" - description: | - Assign the specified name to the container. Must match - `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. - type: "string" - pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - - name: "platform" - in: "query" - description: | - Platform in the format `os[/arch[/variant]]` used for image lookup. - - When specified, the daemon checks if the requested image is present - in the local image cache with the given OS and Architecture, and - otherwise returns a `404` status. - - If the option is not set, the host's native OS and Architecture are - used to look up the image in the image cache. However, if no platform - is passed and the given image does exist in the local image cache, - but its OS or architecture does not match, the container is created - with the available image, and a warning is added to the `Warnings` - field in the response, for example; - - WARNING: The requested image's platform (linux/arm64/v8) does not - match the detected host platform (linux/amd64) and no - specific platform was requested - - type: "string" - default: "" - - name: "body" - in: "body" - description: "Container to create" - schema: - allOf: - - $ref: "#/definitions/ContainerConfig" - - type: "object" - properties: - HostConfig: - $ref: "#/definitions/HostConfig" - NetworkingConfig: - $ref: "#/definitions/NetworkingConfig" - example: - Hostname: "" - Domainname: "" - User: "" - AttachStdin: false - AttachStdout: true - AttachStderr: true - Tty: false - OpenStdin: false - StdinOnce: false - Env: - - "FOO=bar" - - "BAZ=quux" - Cmd: - - "date" - Entrypoint: "" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - Volumes: - /volumes/data: {} - WorkingDir: "" - NetworkDisabled: false - MacAddress: "12:34:56:78:9a:bc" - ExposedPorts: - 22/tcp: {} - StopSignal: "SIGTERM" - StopTimeout: 10 - HostConfig: - Binds: - - "/tmp:/tmp" - Links: - - "redis3:redis" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - NanoCpus: 500000 - CpuPercent: 80 - CpuShares: 512 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpuQuota: 50000 - CpusetCpus: "0,1" - CpusetMems: "0,1" - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 300 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceWriteIOps: - - {} - DeviceRequests: - - Driver: "nvidia" - Count: -1 - DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] - Capabilities: [["gpu", "nvidia", "compute"]] - Options: - property1: "string" - property2: "string" - MemorySwappiness: 60 - OomKillDisable: false - OomScoreAdj: 500 - PidMode: "" - PidsLimit: 0 - PortBindings: - 22/tcp: - - HostPort: "11022" - PublishAllPorts: false - Privileged: false - ReadonlyRootfs: false - Dns: - - "8.8.8.8" - DnsOptions: - - "" - DnsSearch: - - "" - VolumesFrom: - - "parent" - - "other:ro" - CapAdd: - - "NET_ADMIN" - CapDrop: - - "MKNOD" - GroupAdd: - - "newgroup" - RestartPolicy: - Name: "" - MaximumRetryCount: 0 - AutoRemove: true - NetworkMode: "bridge" - Devices: [] - Ulimits: - - {} - LogConfig: - Type: "json-file" - Config: {} - SecurityOpt: [] - StorageOpt: {} - CgroupParent: "" - VolumeDriver: "" - ShmSize: 67108864 - NetworkingConfig: - EndpointsConfig: - isolated_nw: - IPAMConfig: - IPv4Address: "172.20.30.33" - IPv6Address: "2001:db8:abcd::3033" - LinkLocalIPs: - - "169.254.34.68" - - "fe80::3468" - Links: - - "container_1" - - "container_2" - Aliases: - - "server_x" - - "server_y" - database_nw: {} - - required: true - responses: - 201: - description: "Container created successfully" - schema: - $ref: "#/definitions/ContainerCreateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such image" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/{id}/json: - get: - summary: "Inspect a container" - description: "Return low-level information about a container." - operationId: "ContainerInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ContainerInspectResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "size" - in: "query" - type: "boolean" - default: false - description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" - tags: ["Container"] - /containers/{id}/top: - get: - summary: "List processes running inside a container" - description: | - On Unix systems, this is done by running the `ps` command. This endpoint - is not supported on Windows. - operationId: "ContainerTop" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ContainerTopResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "ps_args" - in: "query" - description: "The arguments to pass to `ps`. For example, `aux`" - type: "string" - default: "-ef" - tags: ["Container"] - /containers/{id}/logs: - get: - summary: "Get container logs" - description: | - Get `stdout` and `stderr` logs from a container. - - Note: This endpoint works only for containers with the `json-file` or - `journald` logging driver. - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - operationId: "ContainerLogs" - responses: - 200: - description: | - logs returned as a stream in response body. - For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - Note that unlike the attach endpoint, the logs endpoint does not - upgrade the connection and does not set Content-Type. - schema: - type: "string" - format: "binary" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "until" - in: "query" - description: "Only return logs before this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Container"] - /containers/{id}/changes: - get: - summary: "Get changes on a container’s filesystem" - description: | - Returns which files in a container's filesystem have been added, deleted, - or modified. The `Kind` of modification can be one of: - - - `0`: Modified ("C") - - `1`: Added ("A") - - `2`: Deleted ("D") - operationId: "ContainerChanges" - produces: ["application/json"] - responses: - 200: - description: "The list of changes" - schema: - type: "array" - items: - $ref: "#/definitions/FilesystemChange" - examples: - application/json: - - Path: "/dev" - Kind: 0 - - Path: "/dev/kmsg" - Kind: 1 - - Path: "/test" - Kind: 1 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/export: - get: - summary: "Export a container" - description: "Export the contents of a container as a tarball." - operationId: "ContainerExport" - produces: - - "application/octet-stream" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/stats: - get: - summary: "Get container stats based on resource usage" - description: | - This endpoint returns a live stream of a container’s resource usage - statistics. - - The `precpu_stats` is the CPU statistic of the *previous* read, and is - used to calculate the CPU usage percentage. It is not an exact copy - of the `cpu_stats` field. - - If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is - nil then for compatibility with older daemons the length of the - corresponding `cpu_usage.percpu_usage` array should be used. - - On a cgroup v2 host, the following fields are not set - * `blkio_stats`: all fields other than `io_service_bytes_recursive` - * `cpu_stats`: `cpu_usage.percpu_usage` - * `memory_stats`: `max_usage` and `failcnt` - Also, `memory_stats.stats` fields are incompatible with cgroup v1. - - To calculate the values shown by the `stats` command of the docker cli tool - the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` - * available_memory = `memory_stats.limit` - * Memory usage % = `(used_memory / available_memory) * 100.0` - * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` - * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` - * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` - * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` - operationId: "ContainerStats" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ContainerStatsResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "stream" - in: "query" - description: | - Stream the output. If false, the stats will be output once and then - it will disconnect. - type: "boolean" - default: true - - name: "one-shot" - in: "query" - description: | - Only get a single stat instead of waiting for 2 cycles. Must be used - with `stream=false`. - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/resize: - post: - summary: "Resize a container TTY" - description: "Resize the TTY for a container." - operationId: "ContainerResize" - consumes: - - "application/octet-stream" - produces: - - "text/plain" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "cannot resize container" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "h" - in: "query" - required: true - description: "Height of the TTY session in characters" - type: "integer" - - name: "w" - in: "query" - required: true - description: "Width of the TTY session in characters" - type: "integer" - tags: ["Container"] - /containers/{id}/start: - post: - summary: "Start a container" - operationId: "ContainerStart" - responses: - 204: - description: "no error" - 304: - description: "container already started" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container. Format is a - single character `[a-Z]` or `ctrl-` where `` is one - of: `a-z`, `@`, `^`, `[`, `,` or `_`. - type: "string" - tags: ["Container"] - /containers/{id}/stop: - post: - summary: "Stop a container" - operationId: "ContainerStop" - responses: - 204: - description: "no error" - 304: - description: "container already stopped" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/restart: - post: - summary: "Restart a container" - operationId: "ContainerRestart" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/kill: - post: - summary: "Kill a container" - description: | - Send a POSIX signal to a container, defaulting to killing to the - container. - operationId: "ContainerKill" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is not running" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - default: "SIGKILL" - tags: ["Container"] - /containers/{id}/update: - post: - summary: "Update a container" - description: | - Change various configuration options of a container without having to - recreate it. - operationId: "ContainerUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "The container has been updated." - schema: - $ref: "#/definitions/ContainerUpdateResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "update" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - example: - BlkioWeight: 300 - CpuShares: 512 - CpuPeriod: 100000 - CpuQuota: 50000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpusetCpus: "0,1" - CpusetMems: "0" - Memory: 314572800 - MemorySwap: 514288000 - MemoryReservation: 209715200 - RestartPolicy: - MaximumRetryCount: 4 - Name: "on-failure" - tags: ["Container"] - /containers/{id}/rename: - post: - summary: "Rename a container" - operationId: "ContainerRename" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "name already in use" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "name" - in: "query" - required: true - description: "New name for the container" - type: "string" - tags: ["Container"] - /containers/{id}/pause: - post: - summary: "Pause a container" - description: | - Use the freezer cgroup to suspend all processes in a container. - - Traditionally, when suspending a process the `SIGSTOP` signal is used, - which is observable by the process being suspended. With the freezer - cgroup the process is unaware, and unable to capture, that it is being - suspended, and subsequently resumed. - operationId: "ContainerPause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/unpause: - post: - summary: "Unpause a container" - description: "Resume a container which has been paused." - operationId: "ContainerUnpause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/attach: - post: - summary: "Attach to a container" - description: | - Attach to a container to read its output or send it input. You can attach - to the same container multiple times and you can reattach to containers - that have been detached. - - Either the `stream` or `logs` parameter must be `true` for this endpoint - to do anything. - - See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) - for more details. - - ### Hijacking - - This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, - and `stderr` on the same socket. - - This is the response from the daemon for an attach request: - - ``` - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - [STREAM] - ``` - - After the headers and two new lines, the TCP connection can now be used - for raw, bidirectional communication between the client and server. - - To hint potential proxies about connection hijacking, the Docker client - can also optionally send connection upgrade headers. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 - Upgrade: tcp - Connection: Upgrade - ``` - - The Docker daemon will respond with a `101 UPGRADED` response, and will - similarly follow with the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Content-Type: application/vnd.docker.raw-stream - Connection: Upgrade - Upgrade: tcp - - [STREAM] - ``` - - ### Stream format - - When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), - the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream - and the stream over the hijacked connected is multiplexed to separate out - `stdout` and `stderr`. The stream consists of a series of frames, each - containing a header and a payload. - - The header contains the information which the stream writes (`stdout` or - `stderr`). It also contains the size of the associated frame encoded in - the last four bytes (`uint32`). - - It is encoded on the first eight bytes like this: - - ```go - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - ``` - - `STREAM_TYPE` can be: - - - 0: `stdin` (is written on `stdout`) - - 1: `stdout` - - 2: `stderr` - - `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size - encoded as big endian. - - Following the header is the payload, which is the specified number of - bytes of `STREAM_TYPE`. - - The simplest way to implement this protocol is the following: - - 1. Read 8 bytes. - 2. Choose `stdout` or `stderr` depending on the first byte. - 3. Extract the frame size from the last four bytes. - 4. Read the extracted size and output it on the correct output. - 5. Goto 1. - - ### Stream format when using a TTY - - When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), - the stream is not multiplexed. The data exchanged over the hijacked - connection is simply the raw data from the process PTY and client's - `stdin`. - - operationId: "ContainerAttach" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container.Format is a single - character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, - `@`, `^`, `[`, `,` or `_`. - type: "string" - - name: "logs" - in: "query" - description: | - Replay previous logs from the container. - - This is useful for attaching to a container that has started and you - want to output everything since the container started. - - If `stream` is also enabled, once all the previous output has been - returned, it will seamlessly transition into streaming current - output. - type: "boolean" - default: false - - name: "stream" - in: "query" - description: | - Stream attached streams from the time the request was made onwards. - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/attach/ws: - get: - summary: "Attach to a container via a websocket" - operationId: "ContainerAttachWebsocket" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container.Format is a single - character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, - `@`, `^`, `[`, `,`, or `_`. - type: "string" - - name: "logs" - in: "query" - description: "Return logs" - type: "boolean" - default: false - - name: "stream" - in: "query" - description: "Return stream" - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/wait: - post: - summary: "Wait for a container" - description: "Block until a container stops, then returns the exit code." - operationId: "ContainerWait" - produces: ["application/json"] - responses: - 200: - description: "The container has exit." - schema: - $ref: "#/definitions/ContainerWaitResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "condition" - in: "query" - description: | - Wait until a container state reaches the given condition. - - Defaults to `not-running` if omitted or empty. - type: "string" - enum: - - "not-running" - - "next-exit" - - "removed" - default: "not-running" - tags: ["Container"] - /containers/{id}: - delete: - summary: "Remove a container" - operationId: "ContainerDelete" - responses: - 204: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: | - You cannot remove a running container: c2ada9df5af8. Stop the - container before attempting removal or force remove - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "v" - in: "query" - description: "Remove anonymous volumes associated with the container." - type: "boolean" - default: false - - name: "force" - in: "query" - description: "If the container is running, kill it before removing it." - type: "boolean" - default: false - - name: "link" - in: "query" - description: "Remove the specified link associated with the container." - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/archive: - head: - summary: "Get information about files in a container" - description: | - A response header `X-Docker-Container-Path-Stat` is returned, containing - a base64 - encoded JSON object with some filesystem header information - about the path. - operationId: "ContainerArchiveInfo" - responses: - 200: - description: "no error" - headers: - X-Docker-Container-Path-Stat: - type: "string" - description: | - A base64 - encoded JSON object with some filesystem header - information about the path - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - get: - summary: "Get an archive of a filesystem resource in a container" - description: "Get a tar archive of a resource in the filesystem of container id." - operationId: "ContainerArchive" - produces: ["application/x-tar"] - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - put: - summary: "Extract an archive of files or folders to a directory in a container" - description: | - Upload a tar archive to be extracted to a path in the filesystem of container id. - `path` parameter is asserted to be a directory. If it exists as a file, 400 error - will be returned with message "not a directory". - operationId: "PutContainerArchive" - consumes: ["application/x-tar", "application/octet-stream"] - responses: - 200: - description: "The content was extracted successfully" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "not a directory" - 403: - description: "Permission denied, the volume or container rootfs is marked as read-only." - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such container or path does not exist inside the container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Path to a directory in the container to extract the archive’s contents into. " - type: "string" - - name: "noOverwriteDirNonDir" - in: "query" - description: | - If `1`, `true`, or `True` then it will be an error if unpacking the - given content would cause an existing directory to be replaced with - a non-directory and vice versa. - type: "string" - - name: "copyUIDGID" - in: "query" - description: | - If `1`, `true`, then it will copy UID/GID maps to the dest file or - dir - type: "string" - - name: "inputStream" - in: "body" - required: true - description: | - The input stream must be a tar archive compressed with one of the - following algorithms: `identity` (no compression), `gzip`, `bzip2`, - or `xz`. - schema: - type: "string" - format: "binary" - tags: ["Container"] - /containers/prune: - post: - summary: "Delete stopped containers" - produces: - - "application/json" - operationId: "ContainerPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ContainerPruneResponse" - properties: - ContainersDeleted: - description: "Container IDs that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /images/json: - get: - summary: "List Images" - description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." - operationId: "ImageList" - produces: - - "application/json" - responses: - 200: - description: "Summary image data for the images matching the query" - schema: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "all" - in: "query" - description: "Show all images. Only images from a final layer (no children) are shown by default." - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the images list. - - Available filters: - - - `before`=(`[:]`, `` or ``) - - `dangling=true` - - `label=key` or `label="key=value"` of an image label - - `reference`=(`[:]`) - - `since`=(`[:]`, `` or ``) - - `until=` - type: "string" - - name: "shared-size" - in: "query" - description: "Compute and show shared size as a `SharedSize` field on each image." - type: "boolean" - default: false - - name: "digests" - in: "query" - description: "Show digest information as a `RepoDigests` field on each image." - type: "boolean" - default: false - - name: "manifests" - in: "query" - description: "Include `Manifests` in the image summary." - type: "boolean" - default: false - tags: ["Image"] - /build: - post: - summary: "Build an image" - description: | - Build an image from a tar archive with a `Dockerfile` in it. - - The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). - - The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. - - The build is canceled if the client drops the connection by quitting or being killed. - operationId: "ImageBuild" - consumes: - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "inputStream" - in: "body" - description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." - schema: - type: "string" - format: "binary" - - name: "dockerfile" - in: "query" - description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." - type: "string" - default: "Dockerfile" - - name: "t" - in: "query" - description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." - type: "string" - - name: "extrahosts" - in: "query" - description: "Extra hosts to add to /etc/hosts" - type: "string" - - name: "remote" - in: "query" - description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." - type: "string" - - name: "q" - in: "query" - description: "Suppress verbose build output." - type: "boolean" - default: false - - name: "nocache" - in: "query" - description: "Do not use the cache when building the image." - type: "boolean" - default: false - - name: "cachefrom" - in: "query" - description: "JSON array of images used for build cache resolution." - type: "string" - - name: "pull" - in: "query" - description: "Attempt to pull the image even if an older image exists locally." - type: "string" - - name: "rm" - in: "query" - description: "Remove intermediate containers after a successful build." - type: "boolean" - default: true - - name: "forcerm" - in: "query" - description: "Always remove intermediate containers, even upon failure." - type: "boolean" - default: false - - name: "memory" - in: "query" - description: "Set memory limit for build." - type: "integer" - - name: "memswap" - in: "query" - description: "Total memory (memory + swap). Set as `-1` to disable swap." - type: "integer" - - name: "cpushares" - in: "query" - description: "CPU shares (relative weight)." - type: "integer" - - name: "cpusetcpus" - in: "query" - description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." - type: "string" - - name: "cpuperiod" - in: "query" - description: "The length of a CPU period in microseconds." - type: "integer" - - name: "cpuquota" - in: "query" - description: "Microseconds of CPU time that the container can get in a CPU period." - type: "integer" - - name: "buildargs" - in: "query" - description: > - JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker - uses the buildargs as the environment context for commands run via the `Dockerfile` RUN - instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for - passing secret values. - - - For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the - query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. - - - [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) - type: "string" - - name: "shmsize" - in: "query" - description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." - type: "integer" - - name: "squash" - in: "query" - description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" - type: "boolean" - - name: "labels" - in: "query" - description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." - type: "string" - - name: "networkmode" - in: "query" - description: | - Sets the networking mode for the run commands during build. Supported - standard values are: `bridge`, `host`, `none`, and `container:`. - Any other value is taken as a custom network's name or ID to which this - container should connect to. - type: "string" - - name: "Content-type" - in: "header" - type: "string" - enum: - - "application/x-tar" - default: "application/x-tar" - - name: "X-Registry-Config" - in: "header" - description: | - This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. - - The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: - - ``` - { - "docker.example.com": { - "username": "janedoe", - "password": "hunter2" - }, - "https://index.docker.io/v1/": { - "username": "mobydock", - "password": "conta1n3rize14" - } - } - ``` - - Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. - type: "string" - - name: "platform" - in: "query" - description: "Platform in the format os[/arch[/variant]]" - type: "string" - default: "" - - name: "target" - in: "query" - description: "Target build stage" - type: "string" - default: "" - - name: "outputs" - in: "query" - description: "BuildKit output configuration" - type: "string" - default: "" - - name: "version" - in: "query" - type: "string" - default: "1" - enum: ["1", "2"] - description: | - Version of the builder backend to use. - - - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) - - `2` is [BuildKit](https://github.com/moby/buildkit) - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /build/prune: - post: - summary: "Delete builder cache" - produces: - - "application/json" - operationId: "BuildPrune" - parameters: - - name: "keep-storage" - in: "query" - description: | - Amount of disk space in bytes to keep for cache - - > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". - > It is kept for backward compatibility and will be removed in API v1.49. - type: "integer" - format: "int64" - - name: "reserved-space" - in: "query" - description: "Amount of disk space in bytes to keep for cache" - type: "integer" - format: "int64" - - name: "max-used-space" - in: "query" - description: "Maximum amount of disk space allowed to keep for cache" - type: "integer" - format: "int64" - - name: "min-free-space" - in: "query" - description: "Target amount of free disk space after pruning" - type: "integer" - format: "int64" - - name: "all" - in: "query" - type: "boolean" - description: "Remove all types of build cache" - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the list of build cache objects. - - Available filters: - - - `until=` remove cache older than ``. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. - - `id=` - - `parent=` - - `type=` - - `description=` - - `inuse` - - `shared` - - `private` - responses: - 200: - description: "No error" - schema: - type: "object" - title: "BuildPruneResponse" - properties: - CachesDeleted: - type: "array" - items: - description: "ID of build cache object" - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /images/create: - post: - summary: "Create an image" - description: "Pull or import an image." - operationId: "ImageCreate" - consumes: - - "text/plain" - - "application/octet-stream" - produces: - - "application/json" - responses: - 200: - description: "no error" - 404: - description: "repository does not exist or no read access" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "fromImage" - in: "query" - description: | - Name of the image to pull. If the name includes a tag or digest, specific behavior applies: - - - If only `fromImage` includes a tag, that tag is used. - - If both `fromImage` and `tag` are provided, `tag` takes precedence. - - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. - - If neither a tag nor digest is specified, all tags are pulled. - type: "string" - - name: "fromSrc" - in: "query" - description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." - type: "string" - - name: "repo" - in: "query" - description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." - type: "string" - - name: "tag" - in: "query" - description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." - type: "string" - - name: "message" - in: "query" - description: "Set commit message for imported image." - type: "string" - - name: "inputImage" - in: "body" - description: "Image content if the value `-` has been specified in fromSrc query parameter" - schema: - type: "string" - required: false - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "changes" - in: "query" - description: | - Apply `Dockerfile` instructions to the image that is created, - for example: `changes=ENV DEBUG=true`. - Note that `ENV DEBUG=true` should be URI component encoded. - - Supported `Dockerfile` instructions: - `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - type: "array" - items: - type: "string" - - name: "platform" - in: "query" - description: | - Platform in the format os[/arch[/variant]]. - - When used in combination with the `fromImage` option, the daemon checks - if the given image is present in the local image cache with the given - OS and Architecture, and otherwise attempts to pull the image. If the - option is not set, the host's native OS and Architecture are used. - If the given image does not exist in the local image cache, the daemon - attempts to pull the image with the host's native OS and Architecture. - If the given image does exists in the local image cache, but its OS or - architecture does not match, a warning is produced. - - When used with the `fromSrc` option to import an image from an archive, - this option sets the platform information for the imported image. If - the option is not set, the host's native OS and Architecture are used - for the imported image. - type: "string" - default: "" - tags: ["Image"] - /images/{name}/json: - get: - summary: "Inspect an image" - description: "Return low-level information about an image." - operationId: "ImageInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/ImageInspect" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - - name: "manifests" - in: "query" - description: "Include Manifests in the image summary." - type: "boolean" - default: false - required: false - tags: ["Image"] - /images/{name}/history: - get: - summary: "Get the history of an image" - description: "Return parent layers of an image." - operationId: "ImageHistory" - produces: ["application/json"] - responses: - 200: - description: "List of image layers" - schema: - type: "array" - items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false - examples: - application/json: - - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" - Created: 1398108230 - CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" - Tags: - - "ubuntu:lucid" - - "ubuntu:10.04" - Size: 182964289 - Comment: "" - - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" - Created: 1398108222 - CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" - Tags: [] - Size: 0 - Comment: "" - - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" - Created: 1371157430 - CreatedBy: "" - Tags: - - "scratch12:latest" - - "scratch:latest" - Size: 0 - Comment: "Imported from -" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - - name: "platform" - type: "string" - in: "query" - description: | - JSON-encoded OCI platform to select the platform-variant. - If omitted, it defaults to any locally available platform, - prioritizing the daemon's host platform. - - If the daemon provides a multi-platform image store, this selects - the platform-variant to show the history for. If the image is - a single-platform image, or if the multi-platform image does not - provide a variant matching the given platform, an error is returned. - - Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` - tags: ["Image"] - /images/{name}/push: - post: - summary: "Push an image" - description: | - Push an image to a registry. - - If you wish to push an image on to a private registry, that image must - already have a tag which references the registry. For example, - `registry.example.com/myimage:latest`. - - The push is cancelled if the HTTP connection is closed. - operationId: "ImagePush" - consumes: - - "application/octet-stream" - responses: - 200: - description: "No error" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - Name of the image to push. For example, `registry.example.com/myimage`. - The image must be present in the local image store with the same name. - - The name should be provided without tag; if a tag is provided, it - is ignored. For example, `registry.example.com/myimage:latest` is - considered equivalent to `registry.example.com/myimage`. - - Use the `tag` parameter to specify the tag to push. - type: "string" - required: true - - name: "tag" - in: "query" - description: | - Tag of the image to push. For example, `latest`. If no tag is provided, - all tags of the given image that are present in the local image store - are pushed. - type: "string" - - name: "platform" - type: "string" - in: "query" - description: | - JSON-encoded OCI platform to select the platform-variant to push. - If not provided, all available variants will attempt to be pushed. - - If the daemon provides a multi-platform image store, this selects - the platform-variant to push to the registry. If the image is - a single-platform image, or if the multi-platform image does not - provide a variant matching the given platform, an error is returned. - - Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - required: true - tags: ["Image"] - /images/{name}/tag: - post: - summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." - operationId: "ImageTag" - responses: - 201: - description: "No error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID to tag." - type: "string" - required: true - - name: "repo" - in: "query" - description: "The repository to tag in. For example, `someuser/someimage`." - type: "string" - - name: "tag" - in: "query" - description: "The name of the new tag." - type: "string" - tags: ["Image"] - /images/{name}: - delete: - summary: "Remove an image" - description: | - Remove an image, along with any untagged parent images that were - referenced by that image. - - Images can't be removed if they have descendant images, are being - used by a running container or are being used by a build. - operationId: "ImageDelete" - produces: ["application/json"] - responses: - 200: - description: "The image was deleted successfully" - schema: - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - examples: - application/json: - - Untagged: "3e2f21a89f" - - Deleted: "3e2f21a89f" - - Deleted: "53b4f83ac9" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - - name: "force" - in: "query" - description: "Remove the image even if it is being used by stopped containers or has other tags" - type: "boolean" - default: false - - name: "noprune" - in: "query" - description: "Do not delete untagged parent images" - type: "boolean" - default: false - - name: "platforms" - in: "query" - description: | - Select platform-specific content to delete. - Multiple values are accepted. - Each platform is a OCI platform encoded as a JSON string. - type: "array" - items: - # This should be OCIPlatform - # but $ref is not supported for array in query in Swagger 2.0 - # $ref: "#/definitions/OCIPlatform" - type: "string" - tags: ["Image"] - /images/search: - get: - summary: "Search images" - description: "Search for an image on Docker Hub." - operationId: "ImageSearch" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - type: "object" - title: "ImageSearchResponseItem" - properties: - description: - type: "string" - is_official: - type: "boolean" - is_automated: - description: | - Whether this repository has automated builds enabled. - -


- - > **Deprecated**: This field is deprecated and will always be "false". - type: "boolean" - example: false - name: - type: "string" - star_count: - type: "integer" - examples: - application/json: - - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" - is_official: true - is_automated: false - name: "alpine" - star_count: 10093 - - description: "Busybox base image." - is_official: true - is_automated: false - name: "Busybox base image." - star_count: 3037 - - description: "The PostgreSQL object-relational database system provides reliability and data integrity." - is_official: true - is_automated: false - name: "postgres" - star_count: 12408 - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "term" - in: "query" - description: "Term to search" - type: "string" - required: true - - name: "limit" - in: "query" - description: "Maximum number of results to return" - type: "integer" - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - - `is-official=(true|false)` - - `stars=` Matches images that has at least 'number' stars. - type: "string" - tags: ["Image"] - /images/prune: - post: - summary: "Delete unused images" - produces: - - "application/json" - operationId: "ImagePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - - - `dangling=` When set to `true` (or `1`), prune only - unused *and* untagged images. When set to `false` - (or `0`), all unused images are pruned. - - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ImagePruneResponse" - properties: - ImagesDeleted: - description: "Images that were deleted" - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /auth: - post: - summary: "Check auth configuration" - description: | - Validate credentials for a registry and, if available, get an identity - token for accessing the registry without password. - operationId: "SystemAuth" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "An identity token was generated successfully." - schema: - type: "object" - title: "SystemAuthResponse" - required: [Status] - properties: - Status: - description: "The status of the authentication" - type: "string" - x-nullable: false - IdentityToken: - description: "An opaque token used to authenticate a user after a successful login" - type: "string" - x-nullable: false - examples: - application/json: - Status: "Login Succeeded" - IdentityToken: "9cbaf023786cd7..." - 204: - description: "No error" - 401: - description: "Auth error" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "authConfig" - in: "body" - description: "Authentication to check" - schema: - $ref: "#/definitions/AuthConfig" - tags: ["System"] - /info: - get: - summary: "Get system information" - operationId: "SystemInfo" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/SystemInfo" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /version: - get: - summary: "Get version" - description: "Returns the version of Docker that is running and various information about the system that Docker is running on." - operationId: "SystemVersion" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/SystemVersion" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /_ping: - get: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPing" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "OK" - headers: - Api-Version: - type: "string" - description: "Max API Version the server supports" - Builder-Version: - type: "string" - description: | - Default version of docker image builder - - The default on Linux is version "2" (BuildKit), but the daemon - can be configured to recommend version "1" (classic Builder). - Windows does not yet support BuildKit for native Windows images, - and uses "1" (classic builder) as a default. - - This value is a recommendation as advertised by the daemon, and - it is up to the client to choose which builder to use. - default: "2" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - Swarm: - type: "string" - enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] - description: | - Contains information about Swarm status of the daemon, - and if the daemon is acting as a manager or worker node. - default: "inactive" - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - headers: - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - tags: ["System"] - head: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPingHead" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "(empty)" - headers: - Api-Version: - type: "string" - description: "Max API Version the server supports" - Builder-Version: - type: "string" - description: "Default version of docker image builder" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - Swarm: - type: "string" - enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] - description: | - Contains information about Swarm status of the daemon, - and if the daemon is acting as a manager or worker node. - default: "inactive" - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /commit: - post: - summary: "Create a new image from a container" - operationId: "ImageCommit" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IDResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "containerConfig" - in: "body" - description: "The container configuration" - schema: - $ref: "#/definitions/ContainerConfig" - - name: "container" - in: "query" - description: "The ID or name of the container to commit" - type: "string" - - name: "repo" - in: "query" - description: "Repository name for the created image" - type: "string" - - name: "tag" - in: "query" - description: "Tag name for the create image" - type: "string" - - name: "comment" - in: "query" - description: "Commit message" - type: "string" - - name: "author" - in: "query" - description: "Author of the image (e.g., `John Hannibal Smith `)" - type: "string" - - name: "pause" - in: "query" - description: "Whether to pause the container before committing" - type: "boolean" - default: true - - name: "changes" - in: "query" - description: "`Dockerfile` instructions to apply while committing" - type: "string" - tags: ["Image"] - /events: - get: - summary: "Monitor events" - description: | - Stream real-time events from the server. - - Various objects within Docker report events when something happens to them. - - Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` - - Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` - - Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` - - Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` - - The Docker daemon reports these events: `reload` - - Services report these events: `create`, `update`, and `remove` - - Nodes report these events: `create`, `update`, and `remove` - - Secrets report these events: `create`, `update`, and `remove` - - Configs report these events: `create`, `update`, and `remove` - - The Builder reports `prune` events - - operationId: "SystemEvents" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/EventMessage" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "since" - in: "query" - description: "Show events created since this timestamp then stream new events." - type: "string" - - name: "until" - in: "query" - description: "Show events created until this timestamp then stop streaming." - type: "string" - - name: "filters" - in: "query" - description: | - A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - - - `config=` config name or ID - - `container=` container name or ID - - `daemon=` daemon name or ID - - `event=` event type - - `image=` image name or ID - - `label=` image or container label - - `network=` network name or ID - - `node=` node ID - - `plugin`= plugin name or ID - - `scope`= local or swarm - - `secret=` secret name or ID - - `service=` service name or ID - - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - - `volume=` volume name - type: "string" - tags: ["System"] - /system/df: - get: - summary: "Get data usage information" - operationId: "SystemDataUsage" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "SystemDataUsageResponse" - properties: - LayersSize: - type: "integer" - format: "int64" - Images: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - Containers: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" - Volumes: - type: "array" - items: - $ref: "#/definitions/Volume" - BuildCache: - type: "array" - items: - $ref: "#/definitions/BuildCache" - example: - LayersSize: 1092588 - Images: - - - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - ParentId: "" - RepoTags: - - "busybox:latest" - RepoDigests: - - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" - Created: 1466724217 - Size: 1092588 - SharedSize: 0 - Labels: {} - Containers: 1 - Containers: - - - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" - Names: - - "/top" - Image: "busybox" - ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - Command: "top" - Created: 1472592424 - Ports: [] - SizeRootFs: 1092588 - Labels: {} - State: "exited" - Status: "Exited (0) 56 minutes ago" - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - IPAMConfig: null - Links: null - Aliases: null - NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" - EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" - Gateway: "172.18.0.1" - IPAddress: "172.18.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Mounts: [] - Volumes: - - - Name: "my-volume" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/my-volume/_data" - Labels: null - Scope: "local" - Options: null - UsageData: - Size: 10920104 - RefCount: 2 - BuildCache: - - - ID: "hw53o5aio51xtltp5xjp8v7fx" - Parents: [] - Type: "regular" - Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" - InUse: false - Shared: true - Size: 0 - CreatedAt: "2021-06-28T13:31:01.474619385Z" - LastUsedAt: "2021-07-07T22:02:32.738075951Z" - UsageCount: 26 - - - ID: "ndlpt0hhvkqcdfkputsk4cq9c" - Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] - Type: "regular" - Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" - InUse: false - Shared: true - Size: 51 - CreatedAt: "2021-06-28T13:31:03.002625487Z" - LastUsedAt: "2021-07-07T22:02:32.773909517Z" - UsageCount: 26 - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "type" - in: "query" - description: | - Object types, for which to compute and return data. - type: "array" - collectionFormat: multi - items: - type: "string" - enum: ["container", "image", "volume", "build-cache"] - tags: ["System"] - /images/{name}/get: - get: - summary: "Export an image" - description: | - Get a tarball containing all images and metadata for a repository. - - If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. - - ### Image tarball format - - An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). - - Additionally, includes the manifest.json file associated with a backwards compatible docker save format. - - If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. - - ```json - { - "hello-world": { - "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" - } - } - ``` - operationId: "ImageGet" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - - name: "platform" - type: "string" - in: "query" - description: | - JSON encoded OCI platform describing a platform which will be used - to select a platform-specific image to be saved if the image is - multi-platform. - If not provided, the full multi-platform image will be saved. - - Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` - tags: ["Image"] - /images/get: - get: - summary: "Export several images" - description: | - Get a tarball containing all images and metadata for several image - repositories. - - For each value of the `names` parameter: if it is a specific name and - tag (e.g. `ubuntu:latest`), then only that image (and its parents) are - returned; if it is an image ID, similarly only that image (and its parents) - are returned and there would be no names referenced in the 'repositories' - file for this image ID. - - For details on the format, see the [export image endpoint](#operation/ImageGet). - operationId: "ImageGetAll" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "names" - in: "query" - description: "Image names to filter by" - type: "array" - items: - type: "string" - - name: "platform" - type: "string" - in: "query" - description: | - JSON encoded OCI platform describing a platform which will be used - to select a platform-specific image to be saved if the image is - multi-platform. - If not provided, the full multi-platform image will be saved. - - Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` - tags: ["Image"] - /images/load: - post: - summary: "Import images" - description: | - Load a set of images and tags into a repository. - - For details on the format, see the [export image endpoint](#operation/ImageGet). - operationId: "ImageLoad" - consumes: - - "application/x-tar" - produces: - - "application/json" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "imagesTarball" - in: "body" - description: "Tar archive containing images" - schema: - type: "string" - format: "binary" - - name: "quiet" - in: "query" - description: "Suppress progress details during load." - type: "boolean" - default: false - - name: "platform" - type: "string" - in: "query" - description: | - JSON encoded OCI platform describing a platform which will be used - to select a platform-specific image to be load if the image is - multi-platform. - If not provided, the full multi-platform image will be loaded. - - Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` - tags: ["Image"] - /containers/{id}/exec: - post: - summary: "Create an exec instance" - description: "Run a command inside a running container." - operationId: "ContainerExec" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IDResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is paused" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execConfig" - in: "body" - description: "Exec configuration" - schema: - type: "object" - title: "ExecConfig" - properties: - AttachStdin: - type: "boolean" - description: "Attach to `stdin` of the exec command." - AttachStdout: - type: "boolean" - description: "Attach to `stdout` of the exec command." - AttachStderr: - type: "boolean" - description: "Attach to `stderr` of the exec command." - ConsoleSize: - type: "array" - description: "Initial console size, as an `[height, width]` array." - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - example: [80, 64] - DetachKeys: - type: "string" - description: | - Override the key sequence for detaching a container. Format is - a single character `[a-Z]` or `ctrl-` where `` - is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - Env: - description: | - A list of environment variables in the form `["VAR=value", ...]`. - type: "array" - items: - type: "string" - Cmd: - type: "array" - description: "Command to run, as a string or array of strings." - items: - type: "string" - Privileged: - type: "boolean" - description: "Runs the exec process with extended privileges." - default: false - User: - type: "string" - description: | - The user, and optionally, group to run the exec process inside - the container. Format is one of: `user`, `user:group`, `uid`, - or `uid:gid`. - WorkingDir: - type: "string" - description: | - The working directory for the exec process inside the container. - example: - AttachStdin: false - AttachStdout: true - AttachStderr: true - DetachKeys: "ctrl-p,ctrl-q" - Tty: false - Cmd: - - "date" - Env: - - "FOO=bar" - - "BAZ=quux" - required: true - - name: "id" - in: "path" - description: "ID or name of container" - type: "string" - required: true - tags: ["Exec"] - /exec/{id}/start: - post: - summary: "Start an exec instance" - description: | - Starts a previously set up exec instance. If detach is true, this endpoint - returns immediately after starting the command. Otherwise, it sets up an - interactive session with the command. - operationId: "ExecStart" - consumes: - - "application/json" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 200: - description: "No error" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Container is stopped or paused" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execStartConfig" - in: "body" - schema: - type: "object" - title: "ExecStartConfig" - properties: - Detach: - type: "boolean" - description: "Detach from the command." - example: false - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - example: true - ConsoleSize: - type: "array" - description: "Initial console size, as an `[height, width]` array." - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - example: [80, 64] - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - /exec/{id}/resize: - post: - summary: "Resize an exec instance" - description: | - Resize the TTY session used by an exec instance. This endpoint only works - if `tty` was specified as part of creating and starting the exec instance. - operationId: "ExecResize" - responses: - 200: - description: "No error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - - name: "h" - in: "query" - required: true - description: "Height of the TTY session in characters" - type: "integer" - - name: "w" - in: "query" - required: true - description: "Width of the TTY session in characters" - type: "integer" - tags: ["Exec"] - /exec/{id}/json: - get: - summary: "Inspect an exec instance" - description: "Return low-level information about an exec instance." - operationId: "ExecInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ExecInspectResponse" - properties: - CanRemove: - type: "boolean" - DetachKeys: - type: "string" - ID: - type: "string" - Running: - type: "boolean" - ExitCode: - type: "integer" - ProcessConfig: - $ref: "#/definitions/ProcessConfig" - OpenStdin: - type: "boolean" - OpenStderr: - type: "boolean" - OpenStdout: - type: "boolean" - ContainerID: - type: "string" - Pid: - type: "integer" - description: "The system process ID for the exec process." - examples: - application/json: - CanRemove: false - ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" - DetachKeys: "" - ExitCode: 2 - ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" - OpenStderr: true - OpenStdin: true - OpenStdout: true - ProcessConfig: - arguments: - - "-c" - - "exit 2" - entrypoint: "sh" - privileged: false - tty: true - user: "1000" - Running: false - Pid: 42000 - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - - /volumes: - get: - summary: "List volumes" - operationId: "VolumeList" - produces: ["application/json"] - responses: - 200: - description: "Summary volume data that matches the query" - schema: - $ref: "#/definitions/VolumeListResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to - process on the volumes list. Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - volumes that are not in use by a container. When set to `false` - (or `0`), only volumes that are in use by one or more - containers are returned. - - `driver=` Matches volumes based on their driver. - - `label=` or `label=:` Matches volumes based on - the presence of a `label` alone or a `label` and a value. - - `name=` Matches all or part of a volume name. - type: "string" - format: "json" - tags: ["Volume"] - - /volumes/create: - post: - summary: "Create a volume" - operationId: "VolumeCreate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 201: - description: "The volume was created successfully" - schema: - $ref: "#/definitions/Volume" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "volumeConfig" - in: "body" - required: true - description: "Volume configuration" - schema: - $ref: "#/definitions/VolumeCreateOptions" - tags: ["Volume"] - - /volumes/{name}: - get: - summary: "Inspect a volume" - operationId: "VolumeInspect" - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Volume" - 404: - description: "No such volume" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - tags: ["Volume"] - - put: - summary: | - "Update a volume. Valid only for Swarm cluster volumes" - operationId: "VolumeUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such volume" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name or ID of the volume" - type: "string" - required: true - - name: "body" - in: "body" - schema: - # though the schema for is an object that contains only a - # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object - # means that if, later on, we support things like changing the - # labels, we can do so without duplicating that information to the - # ClusterVolumeSpec. - type: "object" - description: "Volume configuration" - properties: - Spec: - $ref: "#/definitions/ClusterVolumeSpec" - description: | - The spec of the volume to update. Currently, only Availability may - change. All other fields must remain unchanged. - - name: "version" - in: "query" - description: | - The version number of the volume being updated. This is required to - avoid conflicting writes. Found in the volume's `ClusterVolume` - field. - type: "integer" - format: "int64" - required: true - tags: ["Volume"] - - delete: - summary: "Remove a volume" - description: "Instruct the driver to remove the volume." - operationId: "VolumeDelete" - responses: - 204: - description: "The volume was removed" - 404: - description: "No such volume or volume driver" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Volume is in use and cannot be removed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - - name: "force" - in: "query" - description: "Force the removal of the volume" - type: "boolean" - default: false - tags: ["Volume"] - - /volumes/prune: - post: - summary: "Delete unused volumes" - produces: - - "application/json" - operationId: "VolumePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. - - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "VolumePruneResponse" - properties: - VolumesDeleted: - description: "Volumes that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Volume"] - /networks: - get: - summary: "List networks" - description: | - Returns a list of networks. For details on the format, see the - [network inspect endpoint](#operation/NetworkInspect). - - Note that it uses a different, smaller representation of a network than - inspecting a single network. For example, the list of containers attached - to the network is not propagated in API versions 1.28 and up. - operationId: "NetworkList" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Network" - examples: - application/json: - - Name: "bridge" - Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" - Created: "2016-10-19T06:21:00.416543526Z" - Scope: "local" - Driver: "bridge" - EnableIPv4: true - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: - - - Subnet: "172.17.0.0/16" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - - Name: "none" - Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "null" - EnableIPv4: false - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - - Name: "host" - Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "host" - EnableIPv4: false - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to process - on the networks list. - - Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - networks that are not in use by a container. When set to `false` - (or `0`), only networks that are in use by one or more - containers are returned. - - `driver=` Matches a network's driver. - - `id=` Matches all or part of a network ID. - - `label=` or `label==` of a network label. - - `name=` Matches all or part of a network name. - - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. - type: "string" - tags: ["Network"] - - /networks/{id}: - get: - summary: "Inspect a network" - operationId: "NetworkInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Network" - 404: - description: "Network not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "verbose" - in: "query" - description: "Detailed inspect output for troubleshooting" - type: "boolean" - default: false - - name: "scope" - in: "query" - description: "Filter the network by scope (swarm, global, or local)" - type: "string" - tags: ["Network"] - - delete: - summary: "Remove a network" - operationId: "NetworkDelete" - responses: - 204: - description: "No error" - 403: - description: "operation not supported for pre-defined networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such network" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - tags: ["Network"] - - /networks/create: - post: - summary: "Create a network" - operationId: "NetworkCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "Network created successfully" - schema: - $ref: "#/definitions/NetworkCreateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: | - Forbidden operation. This happens when trying to create a network named after a pre-defined network, - or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "plugin not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "networkConfig" - in: "body" - description: "Network configuration" - required: true - schema: - type: "object" - title: "NetworkCreateRequest" - required: ["Name"] - properties: - Name: - description: "The network's name." - type: "string" - example: "my_network" - Driver: - description: "Name of the network driver plugin to use." - type: "string" - default: "bridge" - example: "bridge" - Scope: - description: | - The level at which the network exists (e.g. `swarm` for cluster-wide - or `local` for machine level). - type: "string" - Internal: - description: "Restrict external access to the network." - type: "boolean" - Attachable: - description: | - Globally scoped network is manually attachable by regular - containers from workers in swarm mode. - type: "boolean" - example: true - Ingress: - description: | - Ingress network is the network which provides the routing-mesh - in swarm mode. - type: "boolean" - example: false - ConfigOnly: - description: | - Creates a config-only network. Config-only networks are placeholder - networks for network configurations to be used by other networks. - Config-only networks cannot be used directly to run containers - or services. - type: "boolean" - default: false - example: false - ConfigFrom: - description: | - Specifies the source which will provide the configuration for - this network. The specified network must be an existing - config-only network; see ConfigOnly. - $ref: "#/definitions/ConfigReference" - IPAM: - description: "Optional custom IP scheme for the network." - $ref: "#/definitions/IPAM" - EnableIPv4: - description: "Enable IPv4 on the network." - type: "boolean" - example: true - EnableIPv6: - description: "Enable IPv6 on the network." - type: "boolean" - example: true - Options: - description: "Network specific options to be used by the drivers." - type: "object" - additionalProperties: - type: "string" - example: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - tags: ["Network"] - - /networks/{id}/connect: - post: - summary: "Connect a container to a network" - description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" - operationId: "NetworkConnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "Operation forbidden" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - title: "NetworkConnectRequest" - properties: - Container: - type: "string" - description: "The ID or name of the container to connect to the network." - EndpointConfig: - $ref: "#/definitions/EndpointSettings" - example: - Container: "3613f73ba0e4" - EndpointConfig: - IPAMConfig: - IPv4Address: "172.24.56.89" - IPv6Address: "2001:db8::5689" - MacAddress: "02:42:ac:12:05:02" - Priority: 100 - tags: ["Network"] - - /networks/{id}/disconnect: - post: - summary: "Disconnect a container from a network" - operationId: "NetworkDisconnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - title: "NetworkDisconnectRequest" - properties: - Container: - type: "string" - description: | - The ID or name of the container to disconnect from the network. - Force: - type: "boolean" - description: | - Force the container to disconnect from the network. - tags: ["Network"] - /networks/prune: - post: - summary: "Delete unused networks" - produces: - - "application/json" - operationId: "NetworkPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "NetworkPruneResponse" - properties: - NetworksDeleted: - description: "Networks that were deleted" - type: "array" - items: - type: "string" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Network"] - /plugins: - get: - summary: "List plugins" - operationId: "PluginList" - description: "Returns information about installed plugins." - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Plugin" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the plugin list. - - Available filters: - - - `capability=` - - `enable=|` - tags: ["Plugin"] - - /plugins/privileges: - get: - summary: "Get plugin privileges" - operationId: "GetPluginPrivileges" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - tags: - - "Plugin" - - /plugins/pull: - post: - summary: "Install a plugin" - operationId: "PluginPull" - description: | - Pulls and installs a plugin. After the plugin is installed, it can be - enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). - produces: - - "application/json" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: | - Remote reference for plugin to install. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "name" - in: "query" - description: | - Local name for the pulled plugin. - - The `:latest` tag is optional, and is used as the default if omitted. - required: false - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration to use when pulling a plugin - from a registry. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/{name}/json: - get: - summary: "Inspect a plugin" - operationId: "PluginInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - tags: ["Plugin"] - /plugins/{name}: - delete: - summary: "Remove a plugin" - operationId: "PluginDelete" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "force" - in: "query" - description: | - Disable the plugin before removing. This may result in issues if the - plugin is in use by a container. - type: "boolean" - default: false - tags: ["Plugin"] - /plugins/{name}/enable: - post: - summary: "Enable a plugin" - operationId: "PluginEnable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "timeout" - in: "query" - description: "Set the HTTP client timeout (in seconds)" - type: "integer" - default: 0 - tags: ["Plugin"] - /plugins/{name}/disable: - post: - summary: "Disable a plugin" - operationId: "PluginDisable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "force" - in: "query" - description: | - Force disable a plugin even if still in use. - required: false - type: "boolean" - tags: ["Plugin"] - /plugins/{name}/upgrade: - post: - summary: "Upgrade a plugin" - operationId: "PluginUpgrade" - responses: - 204: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "remote" - in: "query" - description: | - Remote reference to upgrade to. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration to use when pulling a plugin - from a registry. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/create: - post: - summary: "Create a plugin" - operationId: "PluginCreate" - consumes: - - "application/x-tar" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "query" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "tarContext" - in: "body" - description: "Path to tar containing plugin rootfs and manifest" - schema: - type: "string" - format: "binary" - tags: ["Plugin"] - /plugins/{name}/push: - post: - summary: "Push a plugin" - operationId: "PluginPush" - description: | - Push a plugin to the registry. - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - responses: - 200: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /plugins/{name}/set: - post: - summary: "Configure a plugin" - operationId: "PluginSet" - consumes: - - "application/json" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - type: "string" - example: ["DEBUG=1"] - responses: - 204: - description: "No error" - 404: - description: "Plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /nodes: - get: - summary: "List nodes" - operationId: "NodeList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Node" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `id=` - - `label=` - - `membership=`(`accepted`|`pending`)` - - `name=` - - `node.label=` - - `role=`(`manager`|`worker`)` - type: "string" - tags: ["Node"] - /nodes/{id}: - get: - summary: "Inspect a node" - operationId: "NodeInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Node" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - tags: ["Node"] - delete: - summary: "Delete a node" - operationId: "NodeDelete" - responses: - 200: - description: "no error" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - - name: "force" - in: "query" - description: "Force remove a node from the swarm" - default: false - type: "boolean" - tags: ["Node"] - /nodes/{id}/update: - post: - summary: "Update a node" - operationId: "NodeUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID of the node" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/NodeSpec" - - name: "version" - in: "query" - description: | - The version number of the node object being updated. This is required - to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Node"] - /swarm: - get: - summary: "Inspect swarm" - operationId: "SwarmInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Swarm" - 404: - description: "no such swarm" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/init: - post: - summary: "Initialize a new swarm" - operationId: "SwarmInit" - produces: - - "application/json" - - "text/plain" - responses: - 200: - description: "no error" - schema: - description: "The node ID" - type: "string" - example: "7v2t30z9blmxuhnyo6s4cpenp" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmInitRequest" - properties: - ListenAddr: - description: | - Listen address used for inter-manager communication, as well - as determining the networking interface used for the VXLAN - Tunnel Endpoint (VTEP). This can either be an address/port - combination in the form `192.168.1.1:4567`, or an interface - followed by a port number, like `eth0:4567`. If the port number - is omitted, the default swarm listening port is used. - type: "string" - AdvertiseAddr: - description: | - Externally reachable address advertised to other nodes. This - can either be an address/port combination in the form - `192.168.1.1:4567`, or an interface followed by a port number, - like `eth0:4567`. If the port number is omitted, the port - number from the listen address is used. If `AdvertiseAddr` is - not specified, it will be automatically detected when possible. - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: - ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same address - as `AdvertiseAddr` is used. - - The `DataPathAddr` specifies the address that global scope - network drivers will publish towards other nodes in order to - reach the containers running on this node. Using this parameter - it is possible to separate the container data traffic from the - management traffic of the cluster. - type: "string" - DataPathPort: - description: | - DataPathPort specifies the data path port number for data traffic. - Acceptable port range is 1024 to 49151. - if no port is set or is set to 0, default port 4789 will be used. - type: "integer" - format: "uint32" - DefaultAddrPool: - description: | - Default Address Pool specifies default subnet pools for global - scope networks. - type: "array" - items: - type: "string" - example: ["10.10.0.0/16", "20.20.0.0/16"] - ForceNewCluster: - description: "Force creation of a new swarm." - type: "boolean" - SubnetSize: - description: | - SubnetSize specifies the subnet size of the networks created - from the default subnet pool. - type: "integer" - format: "uint32" - Spec: - $ref: "#/definitions/SwarmSpec" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - DataPathPort: 4789 - DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] - SubnetSize: 24 - ForceNewCluster: false - Spec: - Orchestration: {} - Raft: {} - Dispatcher: {} - CAConfig: {} - EncryptionConfig: - AutoLockManagers: false - tags: ["Swarm"] - /swarm/join: - post: - summary: "Join an existing swarm" - operationId: "SwarmJoin" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmJoinRequest" - properties: - ListenAddr: - description: | - Listen address used for inter-manager communication if the node - gets promoted to manager, as well as determining the networking - interface used for the VXLAN Tunnel Endpoint (VTEP). - type: "string" - AdvertiseAddr: - description: | - Externally reachable address advertised to other nodes. This - can either be an address/port combination in the form - `192.168.1.1:4567`, or an interface followed by a port number, - like `eth0:4567`. If the port number is omitted, the port - number from the listen address is used. If `AdvertiseAddr` is - not specified, it will be automatically detected when possible. - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: - ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same address - as `AdvertiseAddr` is used. - - The `DataPathAddr` specifies the address that global scope - network drivers will publish towards other nodes in order to - reach the containers running on this node. Using this parameter - it is possible to separate the container data traffic from the - management traffic of the cluster. - - type: "string" - RemoteAddrs: - description: | - Addresses of manager nodes already participating in the swarm. - type: "array" - items: - type: "string" - JoinToken: - description: "Secret token for joining this swarm." - type: "string" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - DataPathAddr: "192.168.1.1" - RemoteAddrs: - - "node1:2377" - JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - tags: ["Swarm"] - /swarm/leave: - post: - summary: "Leave a swarm" - operationId: "SwarmLeave" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "force" - description: | - Force leave swarm, even if this is the last manager or that it will - break the cluster. - in: "query" - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/update: - post: - summary: "Update a swarm" - operationId: "SwarmUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - $ref: "#/definitions/SwarmSpec" - - name: "version" - in: "query" - description: | - The version number of the swarm object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - - name: "rotateWorkerToken" - in: "query" - description: "Rotate the worker join token." - type: "boolean" - default: false - - name: "rotateManagerToken" - in: "query" - description: "Rotate the manager join token." - type: "boolean" - default: false - - name: "rotateManagerUnlockKey" - in: "query" - description: "Rotate the manager unlock key." - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/unlockkey: - get: - summary: "Get the unlock key" - operationId: "SwarmUnlockkey" - consumes: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "UnlockKeyResponse" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/unlock: - post: - summary: "Unlock a locked manager" - operationId: "SwarmUnlock" - consumes: - - "application/json" - produces: - - "application/json" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmUnlockRequest" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /services: - get: - summary: "List services" - operationId: "ServiceList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Service" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the services list. - - Available filters: - - - `id=` - - `label=` - - `mode=["replicated"|"global"]` - - `name=` - - name: "status" - in: "query" - type: "boolean" - description: | - Include service status, with count of running and desired tasks. - tags: ["Service"] - /services/create: - post: - summary: "Create a service" - operationId: "ServiceCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/ServiceCreateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "network is not eligible for services" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "name conflicts with an existing service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "web" - TaskTemplate: - ContainerSpec: - Image: "nginx:alpine" - Mounts: - - - ReadOnly: true - Source: "web-data" - Target: "/usr/share/nginx/html" - Type: "volume" - VolumeOptions: - DriverConfig: {} - Labels: - com.example.something: "something-value" - Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] - User: "33" - DNSConfig: - Nameservers: ["8.8.8.8"] - Search: ["example.org"] - Options: ["timeout:3"] - Secrets: - - - File: - Name: "www.example.org.key" - UID: "33" - GID: "33" - Mode: 384 - SecretID: "fpjqlhnwb19zds35k8wn80lq9" - SecretName: "example_org_domain_key" - OomScoreAdj: 0 - LogDriver: - Name: "json-file" - Options: - max-file: "3" - max-size: "10M" - Placement: {} - Resources: - Limits: - MemoryBytes: 104857600 - Reservations: {} - RestartPolicy: - Condition: "on-failure" - Delay: 10000000000 - MaxAttempts: 10 - Mode: - Replicated: - Replicas: 4 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Ports: - - - Protocol: "tcp" - PublishedPort: 8080 - TargetPort: 80 - Labels: - foo: "bar" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration for pulling from private - registries. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - tags: ["Service"] - /services/{id}: - get: - summary: "Inspect a service" - operationId: "ServiceInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Service" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "insertDefaults" - in: "query" - description: "Fill empty fields with default values." - type: "boolean" - default: false - tags: ["Service"] - delete: - summary: "Delete a service" - operationId: "ServiceDelete" - responses: - 200: - description: "no error" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - tags: ["Service"] - /services/{id}/update: - post: - summary: "Update a service" - operationId: "ServiceUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ServiceUpdateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "top" - TaskTemplate: - ContainerSpec: - Image: "busybox" - Args: - - "top" - OomScoreAdj: 0 - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - - - name: "version" - in: "query" - description: | - The version number of the service object being updated. This is - required to avoid conflicting writes. - This version number should be the value as currently set on the - service *before* the update. You can find the current version by - calling `GET /services/{id}` - required: true - type: "integer" - - name: "registryAuthFrom" - in: "query" - description: | - If the `X-Registry-Auth` header is not specified, this parameter - indicates where to find registry authorization credentials. - type: "string" - enum: ["spec", "previous-spec"] - default: "spec" - - name: "rollback" - in: "query" - description: | - Set to this parameter to `previous` to cause a server-side rollback - to the previous service spec. The supplied spec will be ignored in - this case. - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration for pulling from private - registries. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - tags: ["Service"] - /services/{id}/logs: - get: - summary: "Get service logs" - description: | - Get `stdout` and `stderr` logs from a service. See also - [`/containers/{id}/logs`](#operation/ContainerLogs). - - **Note**: This endpoint works only for services with the `local`, - `json-file` or `journald` logging drivers. - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - operationId: "ServiceLogs" - responses: - 200: - description: "logs returned as a stream in response body" - schema: - type: "string" - format: "binary" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such service: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the service" - type: "string" - - name: "details" - in: "query" - description: "Show service context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Service"] - /tasks: - get: - summary: "List tasks" - operationId: "TaskList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Task" - example: - - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - - ID: "1yljwbmlr8er2waf8orvqpwms" - Version: - Index: 30 - CreatedAt: "2016-06-07T21:07:30.019104782Z" - UpdatedAt: "2016-06-07T21:07:30.231958098Z" - Name: "hopeful_cori" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:30.202183143Z" - State: "shutdown" - Message: "shutdown" - ContainerStatus: - ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" - DesiredState: "shutdown" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.5/16" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the tasks list. - - Available filters: - - - `desired-state=(running | shutdown | accepted)` - - `id=` - - `label=key` or `label="key=value"` - - `name=` - - `node=` - - `service=` - tags: ["Task"] - /tasks/{id}: - get: - summary: "Inspect a task" - operationId: "TaskInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Task" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID of the task" - required: true - type: "string" - tags: ["Task"] - /tasks/{id}/logs: - get: - summary: "Get task logs" - description: | - Get `stdout` and `stderr` logs from a task. - See also [`/containers/{id}/logs`](#operation/ContainerLogs). - - **Note**: This endpoint works only for services with the `local`, - `json-file` or `journald` logging drivers. - operationId: "TaskLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 200: - description: "logs returned as a stream in response body" - schema: - type: "string" - format: "binary" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such task: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID of the task" - type: "string" - - name: "details" - in: "query" - description: "Show task context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Task"] - /secrets: - get: - summary: "List secrets" - operationId: "SecretList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Secret" - example: - - ID: "blt1owaxmitz71s9v5zh81zun" - Version: - Index: 85 - CreatedAt: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: "2017-07-20T13:55:28.678958722Z" - Spec: - Name: "mysql-passwd" - Labels: - some.label: "some.value" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the secrets list. - - Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Secret"] - /secrets/create: - post: - summary: "Create a secret" - operationId: "SecretCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IDResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/SecretSpec" - - type: "object" - example: - Name: "app-key.crt" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - tags: ["Secret"] - /secrets/{id}: - get: - summary: "Inspect a secret" - operationId: "SecretInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Secret" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - delete: - summary: "Delete a secret" - operationId: "SecretDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - /secrets/{id}/update: - post: - summary: "Update a Secret" - operationId: "SecretUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such secret" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the secret" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/SecretSpec" - description: | - The spec of the secret to update. Currently, only the Labels field - can be updated. All other fields must remain unchanged from the - [SecretInspect endpoint](#operation/SecretInspect) response values. - - name: "version" - in: "query" - description: | - The version number of the secret object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Secret"] - /configs: - get: - summary: "List configs" - operationId: "ConfigList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Config" - example: - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "server.conf" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the configs list. - - Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Config"] - /configs/create: - post: - summary: "Create a config" - operationId: "ConfigCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IDResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/ConfigSpec" - - type: "object" - example: - Name: "server.conf" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - tags: ["Config"] - /configs/{id}: - get: - summary: "Inspect a config" - operationId: "ConfigInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Config" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - delete: - summary: "Delete a config" - operationId: "ConfigDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - /configs/{id}/update: - post: - summary: "Update a Config" - operationId: "ConfigUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such config" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the config" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/ConfigSpec" - description: | - The spec of the config to update. Currently, only the Labels field - can be updated. All other fields must remain unchanged from the - [ConfigInspect endpoint](#operation/ConfigInspect) response values. - - name: "version" - in: "query" - description: | - The version number of the config object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Config"] - /distribution/{name}/json: - get: - summary: "Get image information from the registry" - description: | - Return image digest and platform information by contacting the registry. - operationId: "DistributionInspect" - produces: - - "application/json" - responses: - 200: - description: "descriptor and platform information" - schema: - $ref: "#/definitions/DistributionInspect" - 401: - description: "Failed authentication or no image found" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Distribution"] - /session: - post: - summary: "Initialize interactive session" - description: | - Start a new interactive session with a server. Session allows server to - call back to the client for advanced capabilities. - - ### Hijacking - - This endpoint hijacks the HTTP connection to HTTP2 transport that allows - the client to expose gPRC services on that connection. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /session HTTP/1.1 - Upgrade: h2c - Connection: Upgrade - ``` - - The Docker daemon responds with a `101 UPGRADED` response follow with - the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Connection: Upgrade - Upgrade: h2c - ``` - operationId: "Session" - produces: - - "application/vnd.docker.raw-stream" - responses: - 101: - description: "no error, hijacking successful" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Session"] diff --git a/vendor/github.com/docker/docker/api/types/build/disk_usage.go b/vendor/github.com/docker/docker/api/types/build/disk_usage.go deleted file mode 100644 index e969b6d61..000000000 --- a/vendor/github.com/docker/docker/api/types/build/disk_usage.go +++ /dev/null @@ -1,8 +0,0 @@ -package build - -// CacheDiskUsage contains disk usage for the build cache. -type CacheDiskUsage struct { - TotalSize int64 - Reclaimable int64 - Items []*CacheRecord -} diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/options.go b/vendor/github.com/docker/docker/api/types/checkpoint/options.go deleted file mode 100644 index 9477458c2..000000000 --- a/vendor/github.com/docker/docker/api/types/checkpoint/options.go +++ /dev/null @@ -1,19 +0,0 @@ -package checkpoint - -// CreateOptions holds parameters to create a checkpoint from a container. -type CreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// ListOptions holds parameters to list checkpoints for a container. -type ListOptions struct { - CheckpointDir string -} - -// DeleteOptions holds parameters to delete a checkpoint from a container. -type DeleteOptions struct { - CheckpointID string - CheckpointDir string -} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go deleted file mode 100644 index a3704edd3..000000000 --- a/vendor/github.com/docker/docker/api/types/client.go +++ /dev/null @@ -1,85 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "bufio" - "context" - "net" -) - -// NewHijackedResponse initializes a [HijackedResponse] type. -func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { - return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - mediaType string - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. -// returns false if HTTP Content-Type is not relevant, and container must be inspected -func (h *HijackedResponse) MediaType() (string, bool) { - if h.mediaType == "" { - return "", false - } - return h.mediaType, true -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// PluginRemoveOptions holds parameters to remove plugins. -type PluginRemoveOptions struct { - Force bool -} - -// PluginEnableOptions holds parameters to enable plugins. -type PluginEnableOptions struct { - Timeout int -} - -// PluginDisableOptions holds parameters to disable plugins. -type PluginDisableOptions struct { - Force bool -} - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - - // PrivilegeFunc is a function that clients can supply to retry operations - // after getting an authorization error. This function returns the registry - // authentication header value in base64 encoded format, or an error if the - // privilege request fails. - // - // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. - PrivilegeFunc func(context.Context) (string, error) - AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error) - Args []string -} - -// PluginCreateOptions hold all options to plugin create. -type PluginCreateOptions struct { - RepoName string -} diff --git a/vendor/github.com/docker/docker/api/types/container/disk_usage.go b/vendor/github.com/docker/docker/api/types/container/disk_usage.go deleted file mode 100644 index 05b6cbe9c..000000000 --- a/vendor/github.com/docker/docker/api/types/container/disk_usage.go +++ /dev/null @@ -1,8 +0,0 @@ -package container - -// DiskUsage contains disk usage for containers. -type DiskUsage struct { - TotalSize int64 - Reclaimable int64 - Items []*Summary -} diff --git a/vendor/github.com/docker/docker/api/types/container/exec.go b/vendor/github.com/docker/docker/api/types/container/exec.go deleted file mode 100644 index f4b22376e..000000000 --- a/vendor/github.com/docker/docker/api/types/container/exec.go +++ /dev/null @@ -1,51 +0,0 @@ -package container - -import "github.com/docker/docker/api/types/common" - -// ExecCreateResponse is the response for a successful exec-create request. -// It holds the ID of the exec that was created. -// -// TODO(thaJeztah): make this a distinct type. -type ExecCreateResponse = common.IDResponse - -// ExecOptions is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecOptions struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - WorkingDir string // Working directory - Cmd []string // Execution commands and args -} - -// ExecStartOptions is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartOptions struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool - // Terminal size [height, width], unused if Tty == false - ConsoleSize *[2]uint `json:",omitempty"` -} - -// ExecAttachOptions is a temp struct used by execAttach. -// -// TODO(thaJeztah): make this a separate type; ContainerExecAttach does not use the Detach option, and cannot run detached. -type ExecAttachOptions = ExecStartOptions - -// ExecInspect holds information returned by exec inspect. -type ExecInspect struct { - ExecID string `json:"ID"` - ContainerID string - Running bool - ExitCode int - Pid int -} diff --git a/vendor/github.com/docker/docker/api/types/container/network_settings.go b/vendor/github.com/docker/docker/api/types/container/network_settings.go deleted file mode 100644 index afec0e543..000000000 --- a/vendor/github.com/docker/docker/api/types/container/network_settings.go +++ /dev/null @@ -1,56 +0,0 @@ -package container - -import ( - "github.com/docker/docker/api/types/network" - "github.com/docker/go-connections/nat" -) - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds networking state for a container when inspecting it. -type NetworkSettingsBase struct { - Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag. - SandboxID string // SandboxID uniquely represents a container's network stack - SandboxKey string // SandboxKey identifies the sandbox - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - - // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - // - // Deprecated: This field is never set and will be removed in a future release. - HairpinMode bool - // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - // - // Deprecated: This field is never set and will be removed in a future release. - LinkLocalIPv6Address string - // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - // - // Deprecated: This field is never set and will be removed in a future release. - LinkLocalIPv6PrefixLen int - SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release. - SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release. -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// NetworkSettingsSummary provides a summary of container's networks -// in /containers/json -type NetworkSettingsSummary struct { - Networks map[string]*network.EndpointSettings -} diff --git a/vendor/github.com/docker/docker/api/types/container/options.go b/vendor/github.com/docker/docker/api/types/container/options.go deleted file mode 100644 index 7a2300576..000000000 --- a/vendor/github.com/docker/docker/api/types/container/options.go +++ /dev/null @@ -1,67 +0,0 @@ -package container - -import "github.com/docker/docker/api/types/filters" - -// ResizeOptions holds parameters to resize a TTY. -// It can be used to resize container TTYs and -// exec process TTYs too. -type ResizeOptions struct { - Height uint - Width uint -} - -// AttachOptions holds parameters to attach to a container. -type AttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// CommitOptions holds parameters to commit changes into a container. -type CommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *Config -} - -// RemoveOptions holds parameters to remove containers. -type RemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// StartOptions holds parameters to start containers. -type StartOptions struct { - CheckpointID string - CheckpointDir string -} - -// ListOptions holds parameters to list containers with. -type ListOptions struct { - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// LogsOptions holds parameters to filter logs with. -type LogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} diff --git a/vendor/github.com/docker/docker/api/types/filters/errors.go b/vendor/github.com/docker/docker/api/types/filters/errors.go deleted file mode 100644 index b8a690d67..000000000 --- a/vendor/github.com/docker/docker/api/types/filters/errors.go +++ /dev/null @@ -1,24 +0,0 @@ -package filters - -import "fmt" - -// invalidFilter indicates that the provided filter or its value is invalid -type invalidFilter struct { - Filter string - Value []string -} - -func (e invalidFilter) Error() string { - msg := "invalid filter" - if e.Filter != "" { - msg += " '" + e.Filter - if e.Value != nil { - msg = fmt.Sprintf("%s=%s", msg, e.Value) - } - msg += "'" - } - return msg -} - -// InvalidParameter marks this error as ErrInvalidParameter -func (e invalidFilter) InvalidParameter() {} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go deleted file mode 100644 index 2085ff38f..000000000 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ /dev/null @@ -1,336 +0,0 @@ -/* -Package filters provides tools for encoding a mapping of keys to a set of -multiple values. -*/ -package filters // import "github.com/docker/docker/api/types/filters" - -import ( - "encoding/json" - "regexp" - "strings" - - "github.com/docker/docker/api/types/versions" -) - -// Args stores a mapping of keys to a set of multiple values. -type Args struct { - fields map[string]map[string]bool -} - -// KeyValuePair are used to initialize a new Args -type KeyValuePair struct { - Key string - Value string -} - -// Arg creates a new KeyValuePair for initializing Args -func Arg(key, value string) KeyValuePair { - return KeyValuePair{Key: key, Value: value} -} - -// NewArgs returns a new Args populated with the initial args -func NewArgs(initialArgs ...KeyValuePair) Args { - args := Args{fields: map[string]map[string]bool{}} - for _, arg := range initialArgs { - args.Add(arg.Key, arg.Value) - } - return args -} - -// Keys returns all the keys in list of Args -func (args Args) Keys() []string { - keys := make([]string, 0, len(args.fields)) - for k := range args.fields { - keys = append(keys, k) - } - return keys -} - -// MarshalJSON returns a JSON byte representation of the Args -func (args Args) MarshalJSON() ([]byte, error) { - if len(args.fields) == 0 { - return []byte("{}"), nil - } - return json.Marshal(args.fields) -} - -// ToJSON returns the Args as a JSON encoded string -func ToJSON(a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - buf, err := json.Marshal(a) - return string(buf), err -} - -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: do not use in any new code; use ToJSON instead -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - -// FromJSON decodes a JSON encoded string into Args -func FromJSON(p string) (Args, error) { - args := NewArgs() - - if p == "" { - return args, nil - } - - raw := []byte(p) - err := json.Unmarshal(raw, &args) - if err == nil { - return args, nil - } - - // Fallback to parsing arguments in the legacy slice format - deprecated := map[string][]string{} - if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, &invalidFilter{} - } - - args.fields = deprecatedArgs(deprecated) - return args, nil -} - -// UnmarshalJSON populates the Args from JSON encode bytes -func (args Args) UnmarshalJSON(raw []byte) error { - return json.Unmarshal(raw, &args.fields) -} - -// Get returns the list of values associated with the key -func (args Args) Get(key string) []string { - values := args.fields[key] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add a new value to the set of values -func (args Args) Add(key, value string) { - if _, ok := args.fields[key]; ok { - args.fields[key][value] = true - } else { - args.fields[key] = map[string]bool{value: true} - } -} - -// Del removes a value from the set -func (args Args) Del(key, value string) { - if _, ok := args.fields[key]; ok { - delete(args.fields[key], value) - if len(args.fields[key]) == 0 { - delete(args.fields, key) - } - } -} - -// Len returns the number of keys in the mapping -func (args Args) Len() int { - return len(args.fields) -} - -// MatchKVList returns true if all the pairs in sources exist as key=value -// pairs in the mapping at key, or if there are no values at key. -func (args Args) MatchKVList(key string, sources map[string]string) bool { - fieldValues := args.fields[key] - - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if len(sources) == 0 { - return false - } - - for value := range fieldValues { - testK, testV, hasValue := strings.Cut(value, "=") - - v, ok := sources[testK] - if !ok { - return false - } - if hasValue && testV != v { - return false - } - } - - return true -} - -// Match returns true if any of the values at key match the source string -func (args Args) Match(field, source string) bool { - if args.ExactMatch(field, source) { - return true - } - - fieldValues := args.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// GetBoolOrDefault returns a boolean value of the key if the key is present -// and is interpretable as a boolean value. Otherwise the default value is returned. -// Error is not nil only if the filter values are not valid boolean or are conflicting. -func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { - fieldValues, ok := args.fields[key] - if !ok { - return defaultValue, nil - } - - if len(fieldValues) == 0 { - return defaultValue, &invalidFilter{key, nil} - } - - isFalse := fieldValues["0"] || fieldValues["false"] - isTrue := fieldValues["1"] || fieldValues["true"] - if isFalse == isTrue { - // Either no or conflicting truthy/falsy value were provided - return defaultValue, &invalidFilter{key, args.Get(key)} - } - return isTrue, nil -} - -// ExactMatch returns true if the source matches exactly one of the values. -func (args Args) ExactMatch(key, source string) bool { - fieldValues, ok := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one value and the source -// matches exactly the value. -func (args Args) UniqueExactMatch(key, source string) bool { - fieldValues := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(args.fields[key]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one value, or the -// source has one of the values as a prefix. -func (args Args) FuzzyMatch(key, source string) bool { - if args.ExactMatch(key, source) { - return true - } - - fieldValues := args.fields[key] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Contains returns true if the key exists in the mapping -func (args Args) Contains(field string) bool { - _, ok := args.fields[field] - return ok -} - -// Validate compared the set of accepted keys against the keys in the mapping. -// An error is returned if any mapping keys are not in the accepted set. -func (args Args) Validate(accepted map[string]bool) error { - for name := range args.fields { - if !accepted[name] { - return &invalidFilter{name, nil} - } - } - return nil -} - -// WalkValues iterates over the list of values for a key in the mapping and calls -// op() for each value. If op returns an error the iteration stops and the -// error is returned. -func (args Args) WalkValues(field string, op func(value string) error) error { - if _, ok := args.fields[field]; !ok { - return nil - } - for v := range args.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -// Clone returns a copy of args. -func (args Args) Clone() (newArgs Args) { - newArgs.fields = make(map[string]map[string]bool, len(args.fields)) - for k, m := range args.fields { - var mm map[string]bool - if m != nil { - mm = make(map[string]bool, len(m)) - for kk, v := range m { - mm[kk] = v - } - } - newArgs.fields[k] = mm - } - return newArgs -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/docker/api/types/image/disk_usage.go b/vendor/github.com/docker/docker/api/types/image/disk_usage.go deleted file mode 100644 index b29d925ca..000000000 --- a/vendor/github.com/docker/docker/api/types/image/disk_usage.go +++ /dev/null @@ -1,8 +0,0 @@ -package image - -// DiskUsage contains disk usage for images. -type DiskUsage struct { - TotalSize int64 - Reclaimable int64 - Items []*Summary -} diff --git a/vendor/github.com/docker/docker/api/types/image/image.go b/vendor/github.com/docker/docker/api/types/image/image.go deleted file mode 100644 index abb7ffd80..000000000 --- a/vendor/github.com/docker/docker/api/types/image/image.go +++ /dev/null @@ -1,47 +0,0 @@ -package image - -import ( - "io" - "time" -) - -// Metadata contains engine-local data about the image. -type Metadata struct { - // LastTagTime is the date and time at which the image was last tagged. - LastTagTime time.Time `json:",omitempty"` -} - -// PruneReport contains the response for Engine API: -// POST "/images/prune" -type PruneReport struct { - ImagesDeleted []DeleteResponse - SpaceReclaimed uint64 -} - -// LoadResponse returns information to the client about a load process. -// -// TODO(thaJeztah): remove this type, and just use an io.ReadCloser -// -// This type was added in https://github.com/moby/moby/pull/18878, related -// to https://github.com/moby/moby/issues/19177; -// -// Make docker load to output json when the response content type is json -// Swarm hijacks the response from docker load and returns JSON rather -// than plain text like the Engine does. This makes the API library to return -// information to figure that out. -// -// However the "load" endpoint unconditionally returns JSON; -// https://github.com/moby/moby/blob/7b9d2ef6e5518a3d3f3cc418459f8df786cfbbd1/api/server/router/image/image_routes.go#L248-L255 -// -// PR https://github.com/moby/moby/pull/21959 made the response-type depend -// on whether "quiet" was set, but this logic got changed in a follow-up -// https://github.com/moby/moby/pull/25557, which made the JSON response-type -// unconditionally, but the output produced depend on whether"quiet" was set. -// -// We should deprecated the "quiet" option, as it's really a client -// responsibility. -type LoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go deleted file mode 100644 index fd038557c..000000000 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ /dev/null @@ -1,122 +0,0 @@ -package image - -import ( - "context" - "io" - - "github.com/docker/docker/api/types/filters" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ImportSource holds source information for ImageImport -type ImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. -} - -// ImportOptions holds information to import images from the client host. -type ImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image -} - -// CreateOptions holds information to create images. -type CreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. -} - -// PullOptions holds information to pull images. -type PullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - - // PrivilegeFunc is a function that clients can supply to retry operations - // after getting an authorization error. This function returns the registry - // authentication header value in base64 encoded format, or an error if the - // privilege request fails. - // - // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. - PrivilegeFunc func(context.Context) (string, error) - Platform string -} - -// PushOptions holds information to push images. -type PushOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - - // PrivilegeFunc is a function that clients can supply to retry operations - // after getting an authorization error. This function returns the registry - // authentication header value in base64 encoded format, or an error if the - // privilege request fails. - // - // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. - PrivilegeFunc func(context.Context) (string, error) - - // Platform is an optional field that selects a specific platform to push - // when the image is a multi-platform image. - // Using this will only push a single platform-specific manifest. - Platform *ocispec.Platform `json:",omitempty"` -} - -// ListOptions holds parameters to list images with. -type ListOptions struct { - // All controls whether all images in the graph are filtered, or just - // the heads. - All bool - - // Filters is a JSON-encoded set of filter arguments. - Filters filters.Args - - // SharedSize indicates whether the shared size of images should be computed. - SharedSize bool - - // ContainerCount indicates whether container count should be computed. - ContainerCount bool - - // Manifests indicates whether the image manifests should be returned. - Manifests bool -} - -// RemoveOptions holds parameters to remove images. -type RemoveOptions struct { - Platforms []ocispec.Platform - Force bool - PruneChildren bool -} - -// HistoryOptions holds parameters to get image history. -type HistoryOptions struct { - // Platform from the manifest list to use for history. - Platform *ocispec.Platform -} - -// LoadOptions holds parameters to load images. -type LoadOptions struct { - // Quiet suppresses progress output - Quiet bool - - // Platforms selects the platforms to load if the image is a - // multi-platform image and has multiple variants. - Platforms []ocispec.Platform -} - -type InspectOptions struct { - // Manifests returns the image manifests. - Manifests bool - - // Platform selects the specific platform of a multi-platform image to inspect. - // - // This option is only available for API version 1.49 and up. - Platform *ocispec.Platform -} - -// SaveOptions holds parameters to save images. -type SaveOptions struct { - // Platforms selects the platforms to save if the image is a - // multi-platform image and has multiple variants. - Platforms []ocispec.Platform -} diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go deleted file mode 100644 index 167ac70ab..000000000 --- a/vendor/github.com/docker/docker/api/types/network/endpoint.go +++ /dev/null @@ -1,153 +0,0 @@ -package network - -import ( - "errors" - "fmt" - "net" - - "github.com/docker/docker/internal/multierror" -) - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint. - // MacAddress may be used to specify a MAC address when the container is created. - // Once the container is running, it becomes operational data (it may contain a - // generated address). - MacAddress string - DriverOpts map[string]string - - // GwPriority determines which endpoint will provide the default gateway - // for the container. The endpoint with the highest priority will be used. - // If multiple endpoints have the same priority, they are lexicographically - // sorted based on their network name, and the one that sorts first is picked. - GwPriority int - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - // DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to - // generate PTR records. - DNSNames []string -} - -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - - if len(es.DNSNames) > 0 { - epCopy.DNSNames = make([]string, len(es.DNSNames)) - copy(epCopy.DNSNames, es.DNSNames) - } - - return &epCopy -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - -// NetworkSubnet describes a user-defined subnet for a specific network. It's only used to validate if an -// EndpointIPAMConfig is valid for a specific network. -type NetworkSubnet interface { - // Contains checks whether the NetworkSubnet contains [addr]. - Contains(addr net.IP) bool - // IsStatic checks whether the subnet was statically allocated (ie. user-defined). - IsStatic() bool -} - -// IsInRange checks whether static IP addresses are valid in a specific network. -func (cfg *EndpointIPAMConfig) IsInRange(v4Subnets []NetworkSubnet, v6Subnets []NetworkSubnet) error { - var errs []error - - if err := validateEndpointIPAddress(cfg.IPv4Address, v4Subnets); err != nil { - errs = append(errs, err) - } - if err := validateEndpointIPAddress(cfg.IPv6Address, v6Subnets); err != nil { - errs = append(errs, err) - } - - return multierror.Join(errs...) -} - -func validateEndpointIPAddress(epAddr string, ipamSubnets []NetworkSubnet) error { - if epAddr == "" { - return nil - } - - var staticSubnet bool - parsedAddr := net.ParseIP(epAddr) - for _, subnet := range ipamSubnets { - if subnet.IsStatic() { - staticSubnet = true - if subnet.Contains(parsedAddr) { - return nil - } - } - } - - if staticSubnet { - return fmt.Errorf("no configured subnet or ip-range contain the IP address %s", epAddr) - } - - return errors.New("user specified IP address is supported only when connecting to networks with user configured subnets") -} - -// Validate checks whether cfg is valid. -func (cfg *EndpointIPAMConfig) Validate() error { - if cfg == nil { - return nil - } - - var errs []error - - if cfg.IPv4Address != "" { - if addr := net.ParseIP(cfg.IPv4Address); addr == nil || addr.To4() == nil || addr.IsUnspecified() { - errs = append(errs, fmt.Errorf("invalid IPv4 address: %s", cfg.IPv4Address)) - } - } - if cfg.IPv6Address != "" { - if addr := net.ParseIP(cfg.IPv6Address); addr == nil || addr.To4() != nil || addr.IsUnspecified() { - errs = append(errs, fmt.Errorf("invalid IPv6 address: %s", cfg.IPv6Address)) - } - } - for _, addr := range cfg.LinkLocalIPs { - if parsed := net.ParseIP(addr); parsed == nil || parsed.IsUnspecified() { - errs = append(errs, fmt.Errorf("invalid link-local IP address: %s", addr)) - } - } - - return multierror.Join(errs...) -} diff --git a/vendor/github.com/docker/docker/api/types/network/ipam.go b/vendor/github.com/docker/docker/api/types/network/ipam.go deleted file mode 100644 index f319e1402..000000000 --- a/vendor/github.com/docker/docker/api/types/network/ipam.go +++ /dev/null @@ -1,134 +0,0 @@ -package network - -import ( - "errors" - "fmt" - "net/netip" - - "github.com/docker/docker/internal/multierror" -) - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string // Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -type ipFamily string - -const ( - ip4 ipFamily = "IPv4" - ip6 ipFamily = "IPv6" -) - -// ValidateIPAM checks whether the network's IPAM passed as argument is valid. It returns a joinError of the list of -// errors found. -func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error { - if ipam == nil { - return nil - } - - var errs []error - for _, cfg := range ipam.Config { - subnet, err := netip.ParsePrefix(cfg.Subnet) - if err != nil { - errs = append(errs, fmt.Errorf("invalid subnet %s: invalid CIDR block notation", cfg.Subnet)) - continue - } - subnetFamily := ip4 - if subnet.Addr().Is6() { - subnetFamily = ip6 - } - - if !enableIPv6 && subnetFamily == ip6 { - continue - } - - if subnet != subnet.Masked() { - errs = append(errs, fmt.Errorf("invalid subnet %s: it should be %s", subnet, subnet.Masked())) - } - - if ipRangeErrs := validateIPRange(cfg.IPRange, subnet, subnetFamily); len(ipRangeErrs) > 0 { - errs = append(errs, ipRangeErrs...) - } - - if err := validateAddress(cfg.Gateway, subnet, subnetFamily); err != nil { - errs = append(errs, fmt.Errorf("invalid gateway %s: %w", cfg.Gateway, err)) - } - - for auxName, aux := range cfg.AuxAddress { - if err := validateAddress(aux, subnet, subnetFamily); err != nil { - errs = append(errs, fmt.Errorf("invalid auxiliary address %s: %w", auxName, err)) - } - } - } - - if err := multierror.Join(errs...); err != nil { - return fmt.Errorf("invalid network config:\n%w", err) - } - - return nil -} - -func validateIPRange(ipRange string, subnet netip.Prefix, subnetFamily ipFamily) []error { - if ipRange == "" { - return nil - } - prefix, err := netip.ParsePrefix(ipRange) - if err != nil { - return []error{fmt.Errorf("invalid ip-range %s: invalid CIDR block notation", ipRange)} - } - family := ip4 - if prefix.Addr().Is6() { - family = ip6 - } - - if family != subnetFamily { - return []error{fmt.Errorf("invalid ip-range %s: parent subnet is an %s block", ipRange, subnetFamily)} - } - - var errs []error - if prefix.Bits() < subnet.Bits() { - errs = append(errs, fmt.Errorf("invalid ip-range %s: CIDR block is bigger than its parent subnet %s", ipRange, subnet)) - } - if prefix != prefix.Masked() { - errs = append(errs, fmt.Errorf("invalid ip-range %s: it should be %s", prefix, prefix.Masked())) - } - if !subnet.Overlaps(prefix) { - errs = append(errs, fmt.Errorf("invalid ip-range %s: parent subnet %s doesn't contain ip-range", ipRange, subnet)) - } - - return errs -} - -func validateAddress(address string, subnet netip.Prefix, subnetFamily ipFamily) error { - if address == "" { - return nil - } - addr, err := netip.ParseAddr(address) - if err != nil { - return errors.New("invalid address") - } - family := ip4 - if addr.Is6() { - family = ip6 - } - - if family != subnetFamily { - return fmt.Errorf("parent subnet is an %s block", subnetFamily) - } - if !subnet.Contains(addr) { - return fmt.Errorf("parent subnet %s doesn't contain this address", subnet) - } - - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go deleted file mode 100644 index d34b8ab72..000000000 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ /dev/null @@ -1,168 +0,0 @@ -package network // import "github.com/docker/docker/api/types/network" - -import ( - "time" - - "github.com/docker/docker/api/types/filters" -) - -const ( - // NetworkDefault is a platform-independent alias to choose the platform-specific default network stack. - NetworkDefault = "default" - // NetworkHost is the name of the predefined network used when the NetworkMode host is selected (only available on Linux) - NetworkHost = "host" - // NetworkNone is the name of the predefined network used when the NetworkMode none is selected (available on both Linux and Windows) - NetworkNone = "none" - // NetworkBridge is the name of the default network on Linux - NetworkBridge = "bridge" - // NetworkNat is the name of the default network on Windows - NetworkNat = "nat" -) - -// CreateRequest is the request message sent to the server for network create call. -type CreateRequest struct { - CreateOptions - Name string // Name is the requested name of the network. - - // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client - // package to older daemons. - CheckDuplicate *bool `json:",omitempty"` -} - -// CreateOptions holds options to create a network. -type CreateOptions struct { - Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). - EnableIPv4 *bool `json:",omitempty"` // EnableIPv4 represents whether to enable IPv4. - EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6. - IPAM *IPAM // IPAM is the network's IP Address Management. - Internal bool // Internal represents if the network is used internal only. - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly]. - Options map[string]string // Options specifies the network-specific options to use for when creating the network. - Labels map[string]string // Labels holds metadata specific to the network being created. -} - -// ListOptions holds parameters to filter the list of networks with. -type ListOptions struct { - Filters filters.Args -} - -// InspectOptions holds parameters to inspect network. -type InspectOptions struct { - Scope string - Verbose bool -} - -// ConnectOptions represents the data to be used to connect a container to the -// network. -type ConnectOptions struct { - Container string - EndpointConfig *EndpointSettings `json:",omitempty"` -} - -// DisconnectOptions represents the data to be used to disconnect a container -// from the network. -type DisconnectOptions struct { - Container string - Force bool -} - -// Inspect is the body of the "get network" http response message. -type Inspect struct { - Name string // Name is the name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv4 bool // EnableIPv4 represents whether IPv4 is enabled - EnableIPv6 bool // EnableIPv6 represents whether IPv6 is enabled - IPAM IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]ServiceInfo `json:",omitempty"` -} - -// Summary is used as response when listing networks. It currently is an alias -// for [Inspect], but may diverge in the future, as not all information may -// be included when listing networks. -type Summary = Inspect - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// PeerInfo represents one peer of an overlay network -type PeerInfo struct { - Name string - IP string -} - -// Task carries the information about one backend task -type Task struct { - Name string - EndpointID string - EndpointIP string - Info map[string]string -} - -// ServiceInfo represents service parameters with the list of service's tasks -type ServiceInfo struct { - VIP string - Ports []string - LocalLBIndex int - Tasks []Task -} - -// EndpointResource contains network resources allocated and used for a -// container in a network. -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} - -// ConfigReference specifies the source which provides a network's configuration -type ConfigReference struct { - Network string -} - -var acceptedFilters = map[string]bool{ - "dangling": true, - "driver": true, - "id": true, - "label": true, - "name": true, - "scope": true, - "type": true, -} - -// ValidateFilters validates the list of filter args with the available filters. -func ValidateFilters(filter filters.Args) error { - return filter.Validate(acceptedFilters) -} - -// PruneReport contains the response for Engine API: -// POST "/networks/prune" -type PruneReport struct { - NetworksDeleted []string -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go deleted file mode 100644 index c82f204e8..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginInterfaceType plugin interface type -// swagger:model PluginInterfaceType -type PluginInterfaceType struct { - - // capability - // Required: true - Capability string `json:"Capability"` - - // prefix - // Required: true - Prefix string `json:"Prefix"` - - // version - // Required: true - Version string `json:"Version"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go deleted file mode 100644 index 60d1fb5ad..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ /dev/null @@ -1,71 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "encoding/json" - "fmt" - "sort" -) - -// PluginsListResponse contains the response for the Engine API -type PluginsListResponse []*Plugin - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege - -func (s PluginPrivileges) Len() int { - return len(s) -} - -func (s PluginPrivileges) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -func (s PluginPrivileges) Swap(i, j int) { - sort.Strings(s[i].Value) - sort.Strings(s[j].Value) - s[i], s[j] = s[j], s[i] -} diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go deleted file mode 100644 index ebd5e4b9e..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ /dev/null @@ -1,109 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "strings" -) - -// AuthHeader is the name of the header used to send encoded registry -// authorization credentials for registry operations (push/pull). -const AuthHeader = "X-Registry-Auth" - -// RequestAuthConfig is a function interface that clients can supply -// to retry operations after getting an authorization error. -// -// The function must return the [AuthHeader] value ([AuthConfig]), encoded -// in base64url format ([RFC4648, section 5]), which can be decoded by -// [DecodeAuthConfig]. -// -// It must return an error if the privilege request fails. -// -// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 -type RequestAuthConfig func(context.Context) (string, error) - -// AuthConfig contains authorization information for connecting to a Registry. -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} - -// EncodeAuthConfig serializes the auth configuration as a base64url encoded -// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header. -// -// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 -func EncodeAuthConfig(authConfig AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", errInvalidParameter{err} - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -// DecodeAuthConfig decodes base64url encoded ([RFC4648, section 5]) JSON -// authentication information as sent through the X-Registry-Auth header. -// -// This function always returns an [AuthConfig], even if an error occurs. It is up -// to the caller to decide if authentication is required, and if the error can -// be ignored. -// -// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 -func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { - if authEncoded == "" { - return &AuthConfig{}, nil - } - - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - return decodeAuthConfigFromReader(authJSON) -} - -// DecodeAuthConfigBody decodes authentication information as sent as JSON in the -// body of a request. This function is to provide backward compatibility with old -// clients and API versions. Current clients and API versions expect authentication -// to be provided through the X-Registry-Auth header. -// -// Like [DecodeAuthConfig], this function always returns an [AuthConfig], even if an -// error occurs. It is up to the caller to decide if authentication is required, -// and if the error can be ignored. -func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) { - return decodeAuthConfigFromReader(rdr) -} - -func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) { - authConfig := &AuthConfig{} - if err := json.NewDecoder(rdr).Decode(authConfig); err != nil { - // always return an (empty) AuthConfig to increase compatibility with - // the existing API. - return &AuthConfig{}, invalid(err) - } - return authConfig, nil -} - -func invalid(err error) error { - return errInvalidParameter{fmt.Errorf("invalid X-Registry-Auth header: %w", err)} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { return e.error } - -func (e errInvalidParameter) Unwrap() error { return e.error } diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go deleted file mode 100644 index f0a2113e4..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ /dev/null @@ -1,21 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// AuthenticateOKBody authenticate o k body -// swagger:model AuthenticateOKBody -type AuthenticateOKBody struct { - - // An opaque token used to authenticate a user after a successful login - // Required: true - IdentityToken string `json:"IdentityToken"` - - // The status of the authentication - // Required: true - Status string `json:"Status"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go deleted file mode 100644 index 14c82aaa6..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ /dev/null @@ -1,122 +0,0 @@ -// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.23 - -package registry // import "github.com/docker/docker/api/types/registry" - -import ( - "encoding/json" - "net" - - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet `json:"AllowNondistributableArtifactsCIDRs,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release. - AllowNondistributableArtifactsHostnames []string `json:"AllowNondistributableArtifactsHostnames,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release. - - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string - - // ExtraFields is for internal use to include deprecated fields on older API versions. - ExtraFields map[string]any `json:"-"` -} - -// MarshalJSON implements a custom marshaler to include legacy fields -// in API responses. -func (sc *ServiceConfig) MarshalJSON() ([]byte, error) { - type tmp ServiceConfig - base, err := json.Marshal((*tmp)(sc)) - if err != nil { - return nil, err - } - var merged map[string]any - _ = json.Unmarshal(base, &merged) - - for k, v := range sc.ExtraFields { - merged[k] = v - } - return json.Marshal(merged) -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// String returns the CIDR notation of ipnet -func (ipnet *NetIPNet) String() string { - return (*net.IPNet)(ipnet).String() -} - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) error { - var ipnetStr string - if err := json.Unmarshal(b, &ipnetStr); err != nil { - return err - } - _, cidr, err := net.ParseCIDR(ipnetStr) - if err != nil { - return err - } - *ipnet = NetIPNet(*cidr) - return nil -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// DistributionInspect describes the result obtained from contacting the -// registry to retrieve image metadata -type DistributionInspect struct { - // Descriptor contains information about the manifest, including - // the content addressable digest - Descriptor ocispec.Descriptor - // Platforms contains the list of platforms supported by the image, - // obtained by parsing the manifest - Platforms []ocispec.Platform -} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go deleted file mode 100644 index 82921cebc..000000000 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 292bd7afc..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index 32aaf0d51..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,808 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plugin.proto - -package runtime - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges,proto3" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_22a625af4bc1cc87, []int{0} -} -func (m *PluginSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginSpec.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginSpec.Merge(m, src) -} -func (m *PluginSpec) XXX_Size() int { - return m.Size() -} -func (m *PluginSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PluginSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginSpec proto.InternalMessageInfo - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func (m *PluginSpec) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value,proto3" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { - return fileDescriptor_22a625af4bc1cc87, []int{1} -} -func (m *PluginPrivilege) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginPrivilege) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginPrivilege.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginPrivilege) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginPrivilege.Merge(m, src) -} -func (m *PluginPrivilege) XXX_Size() int { - return m.Size() -} -func (m *PluginPrivilege) XXX_DiscardUnknown() { - xxx_messageInfo_PluginPrivilege.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginPrivilege proto.InternalMessageInfo - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} - -func init() { proto.RegisterFile("plugin.proto", fileDescriptor_22a625af4bc1cc87) } - -var fileDescriptor_22a625af4bc1cc87 = []byte{ - // 225 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x9a, 0xc1, 0xc8, 0xc5, 0x15, 0x00, 0x16, - 0x08, 0x2e, 0x48, 0x4d, 0x16, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, - 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0x52, 0x73, 0xf3, 0x4b, 0x52, 0x25, - 0x98, 0xc0, 0xa2, 0x50, 0x9e, 0x90, 0x01, 0x17, 0x57, 0x41, 0x51, 0x66, 0x59, 0x66, 0x4e, 0x6a, - 0x7a, 0x6a, 0xb1, 0x04, 0xb3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x80, 0x1e, 0xc4, 0xb0, 0x00, 0x98, - 0x44, 0x10, 0x92, 0x1a, 0x21, 0x29, 0x2e, 0x8e, 0x94, 0xcc, 0xe2, 0xc4, 0xa4, 0x9c, 0xd4, 0x14, - 0x09, 0x16, 0x05, 0x46, 0x0d, 0x8e, 0x20, 0x38, 0x5f, 0x48, 0x80, 0x8b, 0x39, 0x35, 0xaf, 0x4c, - 0x82, 0x55, 0x81, 0x59, 0x83, 0x33, 0x08, 0xc4, 0x54, 0x8a, 0xe5, 0xe2, 0x47, 0x33, 0x0c, 0xab, - 0xf3, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, - 0x6e, 0x44, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x91, 0x33, - 0x08, 0xc2, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, - 0x70, 0xd0, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x37, 0xea, 0xe2, 0xca, 0x2a, 0x01, 0x00, - 0x00, -} - -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Env[iNdEx]) - copy(dAtA[i:], m.Env[iNdEx]) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Env[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.Disabled { - i-- - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Privileges) > 0 { - for iNdEx := len(m.Privileges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Privileges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlugin(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Remote) > 0 { - i -= len(m.Remote) - copy(dAtA[i:], m.Remote) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginPrivilege) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - for iNdEx := len(m.Value) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Value[iNdEx]) - copy(dAtA[i:], m.Value[iNdEx]) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Value[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - offset -= sovPlugin(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PluginSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPlugin - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPlugin - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index e311b36ba..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; - repeated string env = 5; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/vendor/github.com/docker/docker/api/types/system/disk_usage.go b/vendor/github.com/docker/docker/api/types/system/disk_usage.go deleted file mode 100644 index 99078cf19..000000000 --- a/vendor/github.com/docker/docker/api/types/system/disk_usage.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import ( - "github.com/docker/docker/api/types/build" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/volume" -) - -// DiskUsage contains response of Engine API for API 1.49 and greater: -// GET "/system/df" -type DiskUsage struct { - Images *image.DiskUsage - Containers *container.DiskUsage - Volumes *volume.DiskUsage - BuildCache *build.CacheDiskUsage -} diff --git a/vendor/github.com/docker/docker/api/types/system/security_opts.go b/vendor/github.com/docker/docker/api/types/system/security_opts.go deleted file mode 100644 index edff3eb1a..000000000 --- a/vendor/github.com/docker/docker/api/types/system/security_opts.go +++ /dev/null @@ -1,48 +0,0 @@ -package system - -import ( - "errors" - "fmt" - "strings" -) - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a -// type-safe [SecurityOpt]. -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - for _, s := range strings.Split(opt, ",") { - k, v, ok := strings.Cut(s, "=") - if !ok { - return nil, fmt.Errorf("invalid security option %q", s) - } - if k == "" || v == "" { - return nil, errors.New("invalid empty security option") - } - if k == "name" { - secopt.Name = v - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) - } - so = append(so, secopt) - } - return so, nil -} - -// KeyValue holds a key/value pair. -type KeyValue struct { - Key, Value string -} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go deleted file mode 100644 index 443cf82a9..000000000 --- a/vendor/github.com/docker/docker/api/types/types.go +++ /dev/null @@ -1,103 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "github.com/docker/docker/api/types/build" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/volume" -) - -const ( - // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams - MediaTypeRawStream = "application/vnd.docker.raw-stream" - - // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams - MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" -) - -// Ping contains response of Engine API: -// GET "/_ping" -type Ping struct { - APIVersion string - OSType string - Experimental bool - BuilderVersion build.BuilderVersion - - // SwarmStatus provides information about the current swarm status of the - // engine, obtained from the "Swarm" header in the API response. - // - // It can be a nil struct if the API version does not provide this header - // in the ping response, or if an error occurred, in which case the client - // should use other ways to get the current swarm status, such as the /swarm - // endpoint. - SwarmStatus *swarm.Status -} - -// ComponentVersion describes the version information for a specific component. -type ComponentVersion struct { - Name string - Version string - Details map[string]string `json:",omitempty"` -} - -// Version contains response of Engine API: -// GET "/version" -type Version struct { - Platform struct{ Name string } `json:",omitempty"` - Components []ComponentVersion `json:",omitempty"` - - // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility - - Version string - APIVersion string `json:"ApiVersion"` - MinAPIVersion string `json:"MinAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// DiskUsageObject represents an object type used for disk usage query filtering. -type DiskUsageObject string - -const ( - // ContainerObject represents a container DiskUsageObject. - ContainerObject DiskUsageObject = "container" - // ImageObject represents an image DiskUsageObject. - ImageObject DiskUsageObject = "image" - // VolumeObject represents a volume DiskUsageObject. - VolumeObject DiskUsageObject = "volume" - // BuildCacheObject represents a build-cache DiskUsageObject. - BuildCacheObject DiskUsageObject = "build-cache" -) - -// DiskUsageOptions holds parameters for system disk usage query. -type DiskUsageOptions struct { - // Types specifies what object types to include in the response. If empty, - // all object types are returned. - Types []DiskUsageObject -} - -// DiskUsage contains response of Engine API: -// GET "/system/df" -type DiskUsage struct { - LayersSize int64 - Images []*image.Summary - Containers []*container.Summary - Volumes []*volume.Volume - BuildCache []*build.CacheRecord - BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. -} - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest string - Size int -} diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go deleted file mode 100644 index 8456a4560..000000000 --- a/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ /dev/null @@ -1,241 +0,0 @@ -package types - -import ( - "context" - - "github.com/docker/docker/api/types/build" - "github.com/docker/docker/api/types/common" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/storage" - "github.com/docker/docker/api/types/swarm" -) - -// IDResponse Response to an API call that returns just an Id. -// -// Deprecated: use either [container.CommitResponse] or [container.ExecCreateResponse]. It will be removed in the next release. -type IDResponse = common.IDResponse - -// ContainerJSONBase contains response of Engine API GET "/containers/{name:.*}/json" -// for API version 1.18 and older. -// -// Deprecated: use [container.InspectResponse] or [container.ContainerJSONBase]. It will be removed in the next release. -type ContainerJSONBase = container.ContainerJSONBase - -// ContainerJSON is the response for the GET "/containers/{name:.*}/json" -// endpoint. -// -// Deprecated: use [container.InspectResponse]. It will be removed in the next release. -type ContainerJSON = container.InspectResponse - -// Container contains response of Engine API: -// GET "/containers/json" -// -// Deprecated: use [container.Summary]. -type Container = container.Summary - -// ContainerState stores container's running state -// -// Deprecated: use [container.State]. -type ContainerState = container.State - -// NetworkSettings exposes the network settings in the api. -// -// Deprecated: use [container.NetworkSettings]. -type NetworkSettings = container.NetworkSettings - -// NetworkSettingsBase holds networking state for a container when inspecting it. -// -// Deprecated: use [container.NetworkSettingsBase]. -type NetworkSettingsBase = container.NetworkSettingsBase - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -// -// Deprecated: use [container.DefaultNetworkSettings]. -type DefaultNetworkSettings = container.DefaultNetworkSettings - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json. -// -// Deprecated: use [container.NetworkSettingsSummary]. -type SummaryNetworkSettings = container.NetworkSettingsSummary - -// Health states -const ( - NoHealthcheck = container.NoHealthcheck // Deprecated: use [container.NoHealthcheck]. - Starting = container.Starting // Deprecated: use [container.Starting]. - Healthy = container.Healthy // Deprecated: use [container.Healthy]. - Unhealthy = container.Unhealthy // Deprecated: use [container.Unhealthy]. -) - -// Health stores information about the container's healthcheck results. -// -// Deprecated: use [container.Health]. -type Health = container.Health - -// HealthcheckResult stores information about a single run of a healthcheck probe. -// -// Deprecated: use [container.HealthcheckResult]. -type HealthcheckResult = container.HealthcheckResult - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -// -// Deprecated: use [container.MountPoint]. -type MountPoint = container.MountPoint - -// Port An open port on a container -// -// Deprecated: use [container.Port]. -type Port = container.Port - -// GraphDriverData Information about the storage driver used to store the container's and -// image's filesystem. -// -// Deprecated: use [storage.DriverData]. -type GraphDriverData = storage.DriverData - -// RootFS returns Image's RootFS description including the layer IDs. -// -// Deprecated: use [image.RootFS]. -type RootFS = image.RootFS - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -// -// Deprecated: use [image.InspectResponse]. -type ImageInspect = image.InspectResponse - -// RequestPrivilegeFunc is a function interface that clients can supply to -// retry operations after getting an authorization error. -// This function returns the registry authentication header value in base64 -// format, or an error if the privilege request fails. -// -// Deprecated: moved to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. -type RequestPrivilegeFunc func(context.Context) (string, error) - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -// -// Deprecated: use [swarm.SecretCreateResponse]. -type SecretCreateResponse = swarm.SecretCreateResponse - -// SecretListOptions holds parameters to list secrets -// -// Deprecated: use [swarm.SecretListOptions]. -type SecretListOptions = swarm.SecretListOptions - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -// -// Deprecated: use [swarm.ConfigCreateResponse]. -type ConfigCreateResponse = swarm.ConfigCreateResponse - -// ConfigListOptions holds parameters to list configs -// -// Deprecated: use [swarm.ConfigListOptions]. -type ConfigListOptions = swarm.ConfigListOptions - -// NodeListOptions holds parameters to list nodes with. -// -// Deprecated: use [swarm.NodeListOptions]. -type NodeListOptions = swarm.NodeListOptions - -// NodeRemoveOptions holds parameters to remove nodes with. -// -// Deprecated: use [swarm.NodeRemoveOptions]. -type NodeRemoveOptions = swarm.NodeRemoveOptions - -// TaskListOptions holds parameters to list tasks with. -// -// Deprecated: use [swarm.TaskListOptions]. -type TaskListOptions = swarm.TaskListOptions - -// ServiceCreateOptions contains the options to use when creating a service. -// -// Deprecated: use [swarm.ServiceCreateOptions]. -type ServiceCreateOptions = swarm.ServiceCreateOptions - -// ServiceUpdateOptions contains the options to be used for updating services. -// -// Deprecated: use [swarm.ServiceCreateOptions]. -type ServiceUpdateOptions = swarm.ServiceUpdateOptions - -const ( - RegistryAuthFromSpec = swarm.RegistryAuthFromSpec // Deprecated: use [swarm.RegistryAuthFromSpec]. - RegistryAuthFromPreviousSpec = swarm.RegistryAuthFromPreviousSpec // Deprecated: use [swarm.RegistryAuthFromPreviousSpec]. -) - -// ServiceListOptions holds parameters to list services with. -// -// Deprecated: use [swarm.ServiceListOptions]. -type ServiceListOptions = swarm.ServiceListOptions - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -// -// Deprecated: use [swarm.ServiceInspectOptions]. -type ServiceInspectOptions = swarm.ServiceInspectOptions - -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -// -// Deprecated: use [swarm.UnlockKeyResponse]. -type SwarmUnlockKeyResponse = swarm.UnlockKeyResponse - -// BuildCache contains information about a build cache record. -// -// Deprecated: deprecated in API 1.49. Use [build.CacheRecord] instead. -type BuildCache = build.CacheRecord - -// BuildCachePruneOptions hold parameters to prune the build cache -// -// Deprecated: use [build.CachePruneOptions]. -type BuildCachePruneOptions = build.CachePruneOptions - -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -// -// Deprecated: use [build.CachePruneReport]. -type BuildCachePruneReport = build.CachePruneReport - -// BuildResult contains the image id of a successful build/ -// -// Deprecated: use [build.Result]. -type BuildResult = build.Result - -// ImageBuildOptions holds the information -// necessary to build images. -// -// Deprecated: use [build.ImageBuildOptions]. -type ImageBuildOptions = build.ImageBuildOptions - -// ImageBuildOutput defines configuration for exporting a build result -// -// Deprecated: use [build.ImageBuildOutput]. -type ImageBuildOutput = build.ImageBuildOutput - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -// -// Deprecated: use [build.ImageBuildResponse]. -type ImageBuildResponse = build.ImageBuildResponse - -// BuilderVersion sets the version of underlying builder to use -// -// Deprecated: use [build.BuilderVersion]. -type BuilderVersion = build.BuilderVersion - -const ( - // BuilderV1 is the first generation builder in docker daemon - // - // Deprecated: use [build.BuilderV1]. - BuilderV1 = build.BuilderV1 - // BuilderBuildKit is builder based on moby/buildkit project - // - // Deprecated: use [build.BuilderBuildKit]. - BuilderBuildKit = build.BuilderBuildKit -) diff --git a/vendor/github.com/docker/docker/api/types/volume/disk_usage.go b/vendor/github.com/docker/docker/api/types/volume/disk_usage.go deleted file mode 100644 index 3d716c6e0..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/disk_usage.go +++ /dev/null @@ -1,8 +0,0 @@ -package volume - -// DiskUsage contains disk usage for volumes. -type DiskUsage struct { - TotalSize int64 - Reclaimable int64 - Items []*Volume -} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go deleted file mode 100644 index 0b9645e00..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/options.go +++ /dev/null @@ -1,15 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -import "github.com/docker/docker/api/types/filters" - -// ListOptions holds parameters to list volumes. -type ListOptions struct { - Filters filters.Args -} - -// PruneReport contains the response for Engine API: -// POST "/volumes/prune" -type PruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go deleted file mode 100644 index f958f80a6..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_update.go +++ /dev/null @@ -1,7 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// UpdateOptions is configuration to update a Volume with. -type UpdateOptions struct { - // Spec is the ClusterVolumeSpec to update the volume to. - Spec *ClusterVolumeSpec `json:"Spec,omitempty"` -} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md deleted file mode 100644 index f8af3ab90..000000000 --- a/vendor/github.com/docker/docker/client/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Go client for the Docker Engine API - -The `docker` command uses this package to communicate with the daemon. It can -also be used by your own Go applications to do anything the command-line -interface does – running containers, pulling images, managing swarms, etc. - -For example, to list all containers (the equivalent of `docker ps --all`): - -```go -package main - -import ( - "context" - "fmt" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/client" -) - -func main() { - apiClient, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - panic(err) - } - defer apiClient.Close() - - containers, err := apiClient.ContainerList(context.Background(), container.ListOptions{All: true}) - if err != nil { - panic(err) - } - - for _, ctr := range containers { - fmt.Printf("%s %s (status: %s)\n", ctr.ID, ctr.Image, ctr.Status) - } -} -``` - -[Full documentation is available on pkg.go.dev.](https://pkg.go.dev/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go deleted file mode 100644 index 51a73cdb2..000000000 --- a/vendor/github.com/docker/docker/client/build_cancel.go +++ /dev/null @@ -1,16 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// BuildCancel requests the daemon to cancel the ongoing build request. -func (cli *Client) BuildCancel(ctx context.Context, id string) error { - query := url.Values{} - query.Set("id", id) - - resp, err := cli.post(ctx, "/build/cancel", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go deleted file mode 100644 index db8fad55d..000000000 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ /dev/null @@ -1,56 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/build" - "github.com/docker/docker/api/types/filters" - "github.com/pkg/errors" -) - -// BuildCachePrune requests the daemon to delete unused cache data -func (cli *Client) BuildCachePrune(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error) { - if err := cli.NewVersionError(ctx, "1.31", "build prune"); err != nil { - return nil, err - } - - query := url.Values{} - if opts.All { - query.Set("all", "1") - } - - if opts.KeepStorage != 0 { - query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage))) - } - if opts.ReservedSpace != 0 { - query.Set("reserved-space", strconv.Itoa(int(opts.ReservedSpace))) - } - if opts.MaxUsedSpace != 0 { - query.Set("max-used-space", strconv.Itoa(int(opts.MaxUsedSpace))) - } - if opts.MinFreeSpace != 0 { - query.Set("min-free-space", strconv.Itoa(int(opts.MinFreeSpace))) - } - f, err := filters.ToJSON(opts.Filters) - if err != nil { - return nil, errors.Wrap(err, "prune could not marshal filters option") - } - query.Set("filters", f) - - resp, err := cli.post(ctx, "/build/prune", query, nil, nil) - defer ensureReaderClosed(resp) - - if err != nil { - return nil, err - } - - report := build.CachePruneReport{} - if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { - return nil, errors.Wrap(err, "error retrieving disk usage") - } - - return &report, nil -} diff --git a/vendor/github.com/docker/docker/client/checkpoint.go b/vendor/github.com/docker/docker/client/checkpoint.go deleted file mode 100644 index f690f7c95..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/checkpoint" -) - -// CheckpointAPIClient defines API client methods for the checkpoints. -// -// Experimental: checkpoint and restore is still an experimental feature, -// and only available if the daemon is running with experimental features -// enabled. -type CheckpointAPIClient interface { - CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error - CheckpointDelete(ctx context.Context, container string, options checkpoint.DeleteOptions) error - CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go deleted file mode 100644 index 7b06fee31..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/checkpoint" -) - -// CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, containerID string, options checkpoint.CreateOptions) error { - containerID, err := trimID("container", containerID) - if err != nil { - return err - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/checkpoints", nil, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go deleted file mode 100644 index d15162ea0..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/checkpoint" -) - -// CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options checkpoint.DeleteOptions) error { - containerID, err := trimID("container", containerID) - if err != nil { - return err - } - - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go deleted file mode 100644 index 9e7963f0b..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/checkpoint" -) - -// CheckpointList returns the checkpoints of the given container in the docker host -func (cli *Client) CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) { - var checkpoints []checkpoint.Summary - - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return checkpoints, err - } - - err = json.NewDecoder(resp.Body).Decode(&checkpoints) - return checkpoints, err -} diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go deleted file mode 100644 index 9e366ce20..000000000 --- a/vendor/github.com/docker/docker/client/client_deprecated.go +++ /dev/null @@ -1,27 +0,0 @@ -package client - -import "net/http" - -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -// -// Deprecated: use [NewClientWithOpts] passing the [WithHost], [WithVersion], -// [WithHTTPClient] and [WithHTTPHeaders] options. We recommend enabling API -// version negotiation by passing the [WithAPIVersionNegotiation] option instead -// of WithVersion. -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) -} - -// NewEnvClient initializes a new API client based on environment variables. -// See FromEnv for a list of support environment variables. -// -// Deprecated: use [NewClientWithOpts] passing the [FromEnv] option. -func NewEnvClient() (*Client, error) { - return NewClientWithOpts(FromEnv) -} diff --git a/vendor/github.com/docker/docker/client/client_interfaces.go b/vendor/github.com/docker/docker/client/client_interfaces.go deleted file mode 100644 index fe4b1af9b..000000000 --- a/vendor/github.com/docker/docker/client/client_interfaces.go +++ /dev/null @@ -1,237 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net" - "net/http" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/build" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/system" - "github.com/docker/docker/api/types/volume" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// CommonAPIClient is the common methods between stable and experimental versions of APIClient. -// -// Deprecated: use [APIClient] instead. This type will be an alias for [APIClient] in the next release, and removed after. -type CommonAPIClient = stableAPIClient - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - stableAPIClient - CheckpointAPIClient // CheckpointAPIClient is still experimental. -} - -type stableAPIClient interface { - ConfigAPIClient - ContainerAPIClient - DistributionAPIClient - ImageAPIClient - NetworkAPIClient - PluginAPIClient - SystemAPIClient - VolumeAPIClient - ClientVersion() string - DaemonHost() string - HTTPClient() *http.Client - ServerVersion(ctx context.Context) (types.Version, error) - NegotiateAPIVersion(ctx context.Context) - NegotiateAPIVersionPing(types.Ping) - HijackDialer - Dialer() func(context.Context) (net.Conn, error) - Close() error - SwarmManagementAPIClient -} - -// SwarmManagementAPIClient defines all methods for managing Swarm-specific -// objects. -type SwarmManagementAPIClient interface { - SwarmAPIClient - NodeAPIClient - ServiceAPIClient - SecretAPIClient - ConfigAPIClient -} - -// HijackDialer defines methods for a hijack dialer. -type HijackDialer interface { - DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) -} - -// ContainerAPIClient defines API client methods for the containers -type ContainerAPIClient interface { - ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (container.CommitResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) - ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) - ContainerExecAttach(ctx context.Context, execID string, options container.ExecAttachOptions) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (container.ExecCreateResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, options container.ExecStartOptions) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (container.InspectResponse, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (container.InspectResponse, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error) - ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options container.RemoveOptions) error - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options container.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, options container.StopOptions) error - ContainerStatPath(ctx context.Context, container, path string) (container.PathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (container.StatsResponseReader, error) - ContainerStatsOneShot(ctx context.Context, container string) (container.StatsResponseReader, error) - ContainerStart(ctx context.Context, container string, options container.StartOptions) error - ContainerStop(ctx context.Context, container string, options container.StopOptions) error - ContainerTop(ctx context.Context, container string, arguments []string) (container.TopResponse, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.UpdateResponse, error) - ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, container.PathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options container.CopyToContainerOptions) error - ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) -} - -// DistributionAPIClient defines API client methods for the registry -type DistributionAPIClient interface { - DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) -} - -// ImageAPIClient defines API client methods for the images -type ImageAPIClient interface { - ImageBuild(ctx context.Context, context io.Reader, options build.ImageBuildOptions) (build.ImageBuildResponse, error) - BuildCachePrune(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error) - BuildCancel(ctx context.Context, id string) error - ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) - ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) - - ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) - ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error) - ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) - ImageTag(ctx context.Context, image, ref string) error - ImagesPrune(ctx context.Context, pruneFilter filters.Args) (image.PruneReport, error) - - ImageInspect(ctx context.Context, image string, _ ...ImageInspectOption) (image.InspectResponse, error) - ImageHistory(ctx context.Context, image string, _ ...ImageHistoryOption) ([]image.HistoryResponseItem, error) - ImageLoad(ctx context.Context, input io.Reader, _ ...ImageLoadOption) (image.LoadResponse, error) - ImageSave(ctx context.Context, images []string, _ ...ImageSaveOption) (io.ReadCloser, error) - - ImageAPIClientDeprecated -} - -// ImageAPIClientDeprecated defines deprecated methods of the ImageAPIClient. -type ImageAPIClientDeprecated interface { - // ImageInspectWithRaw returns the image information and its raw representation. - // - // Deprecated: Use [Client.ImageInspect] instead. Raw response can be obtained using the [ImageInspectWithRawResponse] option. - ImageInspectWithRaw(ctx context.Context, image string) (image.InspectResponse, []byte, error) -} - -// NetworkAPIClient defines API client methods for the networks -type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) - NetworkDisconnect(ctx context.Context, network, container string, force bool) error - NetworkInspect(ctx context.Context, network string, options network.InspectOptions) (network.Inspect, error) - NetworkInspectWithRaw(ctx context.Context, network string, options network.InspectOptions) (network.Inspect, []byte, error) - NetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error) - NetworkRemove(ctx context.Context, network string) error - NetworksPrune(ctx context.Context, pruneFilter filters.Args) (network.PruneReport, error) -} - -// NodeAPIClient defines API client methods for the nodes -type NodeAPIClient interface { - NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) - NodeList(ctx context.Context, options swarm.NodeListOptions) ([]swarm.Node, error) - NodeRemove(ctx context.Context, nodeID string, options swarm.NodeRemoveOptions) error - NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error -} - -// PluginAPIClient defines API client methods for the plugins -type PluginAPIClient interface { - PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) - PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error - PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error - PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error - PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) - PluginSet(ctx context.Context, name string, args []string) error - PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) - PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error -} - -// ServiceAPIClient defines API client methods for the services -type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string, options swarm.ServiceInspectOptions) (swarm.Service, []byte, error) - ServiceList(ctx context.Context, options swarm.ServiceListOptions) ([]swarm.Service, error) - ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) - ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) - TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) - TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) - TaskList(ctx context.Context, options swarm.TaskListOptions) ([]swarm.Task, error) -} - -// SwarmAPIClient defines API client methods for the swarm -type SwarmAPIClient interface { - SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) - SwarmJoin(ctx context.Context, req swarm.JoinRequest) error - SwarmGetUnlockKey(ctx context.Context) (swarm.UnlockKeyResponse, error) - SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error - SwarmLeave(ctx context.Context, force bool) error - SwarmInspect(ctx context.Context) (swarm.Swarm, error) - SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error -} - -// SystemAPIClient defines API client methods for the system -type SystemAPIClient interface { - Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) - Info(ctx context.Context) (system.Info, error) - RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) - DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) - Ping(ctx context.Context) (types.Ping, error) -} - -// VolumeAPIClient defines API client methods for the volumes -type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) - VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) - VolumeRemove(ctx context.Context, volumeID string, force bool) error - VolumesPrune(ctx context.Context, pruneFilter filters.Args) (volume.PruneReport, error) - VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error -} - -// SecretAPIClient defines API client methods for secrets -type SecretAPIClient interface { - SecretList(ctx context.Context, options swarm.SecretListOptions) ([]swarm.Secret, error) - SecretCreate(ctx context.Context, secret swarm.SecretSpec) (swarm.SecretCreateResponse, error) - SecretRemove(ctx context.Context, id string) error - SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) - SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error -} - -// ConfigAPIClient defines API client methods for configs -type ConfigAPIClient interface { - ConfigList(ctx context.Context, options swarm.ConfigListOptions) ([]swarm.Config, error) - ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (swarm.ConfigCreateResponse, error) - ConfigRemove(ctx context.Context, id string) error - ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) - ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error -} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go deleted file mode 100644 index 9fe78ea43..000000000 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !windows - -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST -// (EnvOverrideHost) environment variable is unset or empty. -const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go deleted file mode 100644 index 56572d1a2..000000000 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST -// (EnvOverrideHost) environment variable is unset or empty. -const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go deleted file mode 100644 index 1fbfc21f9..000000000 --- a/vendor/github.com/docker/docker/client/config_create.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigCreate creates a new config. -func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (swarm.ConfigCreateResponse, error) { - var response swarm.ConfigCreateResponse - if err := cli.NewVersionError(ctx, "1.30", "config create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/configs/create", nil, config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go deleted file mode 100644 index 679a42c76..000000000 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigInspectWithRaw returns the config information with raw data -func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { - id, err := trimID("contig", id) - if err != nil { - return swarm.Config{}, nil, err - } - if err := cli.NewVersionError(ctx, "1.30", "config inspect"); err != nil { - return swarm.Config{}, nil, err - } - resp, err := cli.get(ctx, "/configs/"+id, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Config{}, nil, err - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return swarm.Config{}, nil, err - } - - var config swarm.Config - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&config) - - return config, body, err -} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go deleted file mode 100644 index 67779e98a..000000000 --- a/vendor/github.com/docker/docker/client/config_list.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ConfigList returns the list of configs. -func (cli *Client) ConfigList(ctx context.Context, options swarm.ConfigListOptions) ([]swarm.Config, error) { - if err := cli.NewVersionError(ctx, "1.30", "config list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/configs", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var configs []swarm.Config - err = json.NewDecoder(resp.Body).Decode(&configs) - return configs, err -} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go deleted file mode 100644 index a2955c689..000000000 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ConfigRemove removes a config. -func (cli *Client) ConfigRemove(ctx context.Context, id string) error { - id, err := trimID("config", id) - if err != nil { - return err - } - if err := cli.NewVersionError(ctx, "1.30", "config remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go deleted file mode 100644 index ddb219cf6..000000000 --- a/vendor/github.com/docker/docker/client/config_update.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigUpdate attempts to update a config -func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { - id, err := trimID("config", id) - if err != nil { - return err - } - if err := cli.NewVersionError(ctx, "1.30", "config update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go deleted file mode 100644 index 2e7a13e5c..000000000 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ /dev/null @@ -1,65 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" -) - -// ContainerAttach attaches a connection to a container in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerAttach(ctx context.Context, containerID string, options container.AttachOptions) (types.HijackedResponse, error) { - containerID, err := trimID("container", containerID) - if err != nil { - return types.HijackedResponse{}, err - } - - query := url.Values{} - if options.Stream { - query.Set("stream", "1") - } - if options.Stdin { - query.Set("stdin", "1") - } - if options.Stdout { - query.Set("stdout", "1") - } - if options.Stderr { - query.Set("stderr", "1") - } - if options.DetachKeys != "" { - query.Set("detachKeys", options.DetachKeys) - } - if options.Logs { - query.Set("logs", "1") - } - - return cli.postHijacked(ctx, "/containers/"+containerID+"/attach", query, nil, http.Header{ - "Content-Type": {"text/plain"}, - }) -} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go deleted file mode 100644 index 9bb106f77..000000000 --- a/vendor/github.com/docker/docker/client/container_create.go +++ /dev/null @@ -1,168 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "errors" - "net/url" - "path" - "sort" - "strings" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/versions" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ContainerCreate creates a new container based on the given configuration. -// It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) { - var response container.CreateResponse - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return response, err - } - - if err := cli.NewVersionError(ctx, "1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { - return response, err - } - if err := cli.NewVersionError(ctx, "1.41", "specify container image platform"); platform != nil && err != nil { - return response, err - } - if err := cli.NewVersionError(ctx, "1.44", "specify health-check start interval"); config != nil && config.Healthcheck != nil && config.Healthcheck.StartInterval != 0 && err != nil { - return response, err - } - if err := cli.NewVersionError(ctx, "1.44", "specify mac-address per network"); hasEndpointSpecificMacAddress(networkingConfig) && err != nil { - return response, err - } - - if hostConfig != nil { - if versions.LessThan(cli.ClientVersion(), "1.25") { - // When using API 1.24 and under, the client is responsible for removing the container - hostConfig.AutoRemove = false - } - if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") { - // KernelMemory was added in API 1.40, and deprecated in API 1.42 - hostConfig.KernelMemory = 0 - } - if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") { - // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize - hostConfig.ConsoleSize = [2]uint{0, 0} - } - if versions.LessThan(cli.ClientVersion(), "1.44") { - for _, m := range hostConfig.Mounts { - if m.BindOptions != nil { - // ReadOnlyNonRecursive can be safely ignored when API < 1.44 - if m.BindOptions.ReadOnlyForceRecursive { - return response, errors.New("bind-recursive=readonly requires API v1.44 or later") - } - if m.BindOptions.NonRecursive && versions.LessThan(cli.ClientVersion(), "1.40") { - return response, errors.New("bind-recursive=disabled requires API v1.40 or later") - } - } - } - } - - hostConfig.CapAdd = normalizeCapabilities(hostConfig.CapAdd) - hostConfig.CapDrop = normalizeCapabilities(hostConfig.CapDrop) - } - - // Since API 1.44, the container-wide MacAddress is deprecated and will trigger a WARNING if it's specified. - if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.44") { - config.MacAddress = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44. - } - - query := url.Values{} - if p := formatPlatform(platform); p != "" { - query.Set("platform", p) - } - - if containerName != "" { - query.Set("name", containerName) - } - - body := container.CreateRequest{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - } - - resp, err := cli.post(ctx, "/containers/create", query, body, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -// formatPlatform returns a formatted string representing platform (e.g. linux/arm/v7). -// -// Similar to containerd's platforms.Format(), but does allow components to be -// omitted (e.g. pass "architecture" only, without "os": -// https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263 -func formatPlatform(platform *ocispec.Platform) string { - if platform == nil { - return "" - } - return path.Join(platform.OS, platform.Architecture, platform.Variant) -} - -// hasEndpointSpecificMacAddress checks whether one of the endpoint in networkingConfig has a MacAddress defined. -func hasEndpointSpecificMacAddress(networkingConfig *network.NetworkingConfig) bool { - if networkingConfig == nil { - return false - } - for _, endpoint := range networkingConfig.EndpointsConfig { - if endpoint.MacAddress != "" { - return true - } - } - return false -} - -// allCapabilities is a magic value for "all capabilities" -const allCapabilities = "ALL" - -// normalizeCapabilities normalizes capabilities to their canonical form, -// removes duplicates, and sorts the results. -// -// It is similar to [github.com/docker/docker/oci/caps.NormalizeLegacyCapabilities], -// but performs no validation based on supported capabilities. -func normalizeCapabilities(caps []string) []string { - var normalized []string - - unique := make(map[string]struct{}) - for _, c := range caps { - c = normalizeCap(c) - if _, ok := unique[c]; ok { - continue - } - unique[c] = struct{}{} - normalized = append(normalized, c) - } - - sort.Strings(normalized) - return normalized -} - -// normalizeCap normalizes a capability to its canonical format by upper-casing -// and adding a "CAP_" prefix (if not yet present). It also accepts the "ALL" -// magic-value. -func normalizeCap(cap string) string { - cap = strings.ToUpper(cap) - if cap == allCapabilities { - return cap - } - if !strings.HasPrefix(cap, "CAP_") { - cap = "CAP_" + cap - } - return cap -} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go deleted file mode 100644 index a39ec7179..000000000 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ /dev/null @@ -1,81 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, containerID string, options container.ExecOptions) (container.ExecCreateResponse, error) { - containerID, err := trimID("container", containerID) - if err != nil { - return container.ExecCreateResponse{}, err - } - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return container.ExecCreateResponse{}, err - } - - if err := cli.NewVersionError(ctx, "1.25", "env"); len(options.Env) != 0 && err != nil { - return container.ExecCreateResponse{}, err - } - if versions.LessThan(cli.ClientVersion(), "1.42") { - options.ConsoleSize = nil - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/exec", nil, options, nil) - defer ensureReaderClosed(resp) - if err != nil { - return container.ExecCreateResponse{}, err - } - - var response container.ExecCreateResponse - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -// ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config container.ExecStartOptions) error { - if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil - } - resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) - ensureReaderClosed(resp) - return err -} - -// ContainerExecAttach attaches a connection to an exec process in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config container.ExecAttachOptions) (types.HijackedResponse, error) { - if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil - } - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, http.Header{ - "Content-Type": {"application/json"}, - }) -} - -// ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) { - var response container.ExecInspect - resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.Body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go deleted file mode 100644 index 360d52763..000000000 --- a/vendor/github.com/docker/docker/client/container_export.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" -) - -// ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller -// to close the stream. -func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - containerID, err := trimID("container", containerID) - if err != nil { - return nil, err - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) - if err != nil { - return nil, err - } - - return resp.Body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go deleted file mode 100644 index 600031860..000000000 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ /dev/null @@ -1,57 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/url" - - "github.com/docker/docker/api/types/container" -) - -// ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error) { - containerID, err := trimID("container", containerID) - if err != nil { - return container.InspectResponse{}, err - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return container.InspectResponse{}, err - } - - var response container.InspectResponse - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -// ContainerInspectWithRaw returns the container information and its raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (container.InspectResponse, []byte, error) { - containerID, err := trimID("container", containerID) - if err != nil { - return container.InspectResponse{}, nil, err - } - - query := url.Values{} - if getSize { - query.Set("size", "1") - } - resp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return container.InspectResponse{}, nil, err - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return container.InspectResponse{}, nil, err - } - - var response container.InspectResponse - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go deleted file mode 100644 index 22767ae68..000000000 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerKill terminates the container process but does not remove the container from the docker host. -func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { - containerID, err := trimID("container", containerID) - if err != nil { - return err - } - - query := url.Values{} - if signal != "" { - query.Set("signal", signal) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go deleted file mode 100644 index ae30f8d10..000000000 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ /dev/null @@ -1,85 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types/container" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ContainerLogs returns the logs generated by a container in an io.ReadCloser. -// It's up to the caller to close the stream. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerLogs(ctx context.Context, containerID string, options container.LogsOptions) (io.ReadCloser, error) { - containerID, err := trimID("container", containerID) - if err != nil { - return nil, err - } - - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "until"`) - } - query.Set("until", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/containers/"+containerID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.Body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go deleted file mode 100644 index 5cc298401..000000000 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerPause pauses the main process of a given container without terminating it. -func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { - containerID, err := trimID("container", containerID) - if err != nil { - return err - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go deleted file mode 100644 index 3176be596..000000000 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" -) - -// ContainersPrune requests the daemon to delete unused data -func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) { - if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil { - return container.PruneReport{}, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return container.PruneReport{}, err - } - - resp, err := cli.post(ctx, "/containers/prune", query, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return container.PruneReport{}, err - } - - var report container.PruneReport - if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { - return container.PruneReport{}, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go deleted file mode 100644 index 0a092310c..000000000 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerRename changes the name of a given container. -func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { - containerID, err := trimID("container", containerID) - if err != nil { - return err - } - - query := url.Values{} - query.Set("name", newContainerName) - resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go deleted file mode 100644 index 725c08ad4..000000000 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/container" -) - -// ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options container.ResizeOptions) error { - containerID, err := trimID("container", containerID) - if err != nil { - return err - } - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) -} - -// ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error { - execID, err := trimID("exec", execID) - if err != nil { - return err - } - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) -} - -func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { - // FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint. - query := url.Values{} - query.Set("h", strconv.FormatUint(uint64(height), 10)) - query.Set("w", strconv.FormatUint(uint64(width), 10)) - - resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go deleted file mode 100644 index 50559ba6e..000000000 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ /dev/null @@ -1,41 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerRestart stops and starts a container again. -// It makes the daemon wait for the container to be up again for -// a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { - containerID, err := trimID("container", containerID) - if err != nil { - return err - } - - query := url.Values{} - if options.Timeout != nil { - query.Set("t", strconv.Itoa(*options.Timeout)) - } - if options.Signal != "" { - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return err - } - if versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) - } - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go deleted file mode 100644 index b81ed3ebc..000000000 --- a/vendor/github.com/docker/docker/client/container_start.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/container" -) - -// ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error { - containerID, err := trimID("container", containerID) - if err != nil { - return err - } - - query := url.Values{} - if len(options.CheckpointID) != 0 { - query.Set("checkpoint", options.CheckpointID) - } - if len(options.CheckpointDir) != 0 { - query.Set("checkpoint-dir", options.CheckpointDir) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go deleted file mode 100644 index a66b90cb2..000000000 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ /dev/null @@ -1,56 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/container" -) - -// ContainerStats returns near realtime stats for a given container. -// It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (container.StatsResponseReader, error) { - containerID, err := trimID("container", containerID) - if err != nil { - return container.StatsResponseReader{}, err - } - - query := url.Values{} - query.Set("stream", "0") - if stream { - query.Set("stream", "1") - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return container.StatsResponseReader{}, err - } - - return container.StatsResponseReader{ - Body: resp.Body, - OSType: getDockerOS(resp.Header.Get("Server")), - }, nil -} - -// ContainerStatsOneShot gets a single stat entry from a container. -// It differs from `ContainerStats` in that the API should not wait to prime the stats -func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (container.StatsResponseReader, error) { - containerID, err := trimID("container", containerID) - if err != nil { - return container.StatsResponseReader{}, err - } - - query := url.Values{} - query.Set("stream", "0") - query.Set("one-shot", "1") - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return container.StatsResponseReader{}, err - } - - return container.StatsResponseReader{ - Body: resp.Body, - OSType: getDockerOS(resp.Header.Get("Server")), - }, nil -} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go deleted file mode 100644 index eb0129ce3..000000000 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ /dev/null @@ -1,45 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerStop stops a container. In case the container fails to stop -// gracefully within a time frame specified by the timeout argument, -// it is forcefully terminated (killed). -// -// If the timeout is nil, the container's StopTimeout value is used, if set, -// otherwise the engine default. A negative timeout value can be specified, -// meaning no timeout, i.e. no forceful termination is performed. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { - containerID, err := trimID("container", containerID) - if err != nil { - return err - } - - query := url.Values{} - if options.Timeout != nil { - query.Set("t", strconv.Itoa(*options.Timeout)) - } - if options.Signal != "" { - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return err - } - if versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) - } - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go deleted file mode 100644 index 12c8b78f6..000000000 --- a/vendor/github.com/docker/docker/client/container_top.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strings" - - "github.com/docker/docker/api/types/container" -) - -// ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.TopResponse, error) { - containerID, err := trimID("container", containerID) - if err != nil { - return container.TopResponse{}, err - } - - query := url.Values{} - if len(arguments) > 0 { - query.Set("ps_args", strings.Join(arguments, " ")) - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return container.TopResponse{}, err - } - - var response container.TopResponse - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go deleted file mode 100644 index f602549bb..000000000 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerUnpause resumes the process execution within a container -func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { - containerID, err := trimID("container", containerID) - if err != nil { - return err - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go deleted file mode 100644 index 7f0cf6276..000000000 --- a/vendor/github.com/docker/docker/client/container_update.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/container" -) - -// ContainerUpdate updates the resources of a container. -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.UpdateResponse, error) { - containerID, err := trimID("container", containerID) - if err != nil { - return container.UpdateResponse{}, err - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - defer ensureReaderClosed(resp) - if err != nil { - return container.UpdateResponse{}, err - } - - var response container.UpdateResponse - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go deleted file mode 100644 index bda4a9eee..000000000 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ /dev/null @@ -1,122 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "io" - "net/url" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ - -// ContainerWait waits until the specified container is in a certain state -// indicated by the given condition, either "not-running" (default), -// "next-exit", or "removed". -// -// If this client's API version is before 1.30, condition is ignored and -// ContainerWait will return immediately with the two channels, as the server -// will wait as if the condition were "not-running". -// -// If this client's API version is at least 1.30, ContainerWait blocks until -// the request has been acknowledged by the server (with a response header), -// then returns two channels on which the caller can wait for the exit status -// of the container or an error if there was a problem either beginning the -// wait request or in getting the response. This allows the caller to -// synchronize ContainerWait with other calls, such as specifying a -// "next-exit" condition before issuing a ContainerStart request. -func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { - resultC := make(chan container.WaitResponse) - errC := make(chan error, 1) - - containerID, err := trimID("container", containerID) - if err != nil { - errC <- err - return resultC, errC - } - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - errC <- err - return resultC, errC - } - if versions.LessThan(cli.ClientVersion(), "1.30") { - return cli.legacyContainerWait(ctx, containerID) - } - - query := url.Values{} - if condition != "" { - query.Set("condition", string(condition)) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) - if err != nil { - defer ensureReaderClosed(resp) - errC <- err - return resultC, errC - } - - go func() { - defer ensureReaderClosed(resp) - - responseText := bytes.NewBuffer(nil) - stream := io.TeeReader(resp.Body, responseText) - - var res container.WaitResponse - if err := json.NewDecoder(stream).Decode(&res); err != nil { - // NOTE(nicks): The /wait API does not work well with HTTP proxies. - // At any time, the proxy could cut off the response stream. - // - // But because the HTTP status has already been written, the proxy's - // only option is to write a plaintext error message. - // - // If there's a JSON parsing error, read the real error message - // off the body and send it to the client. - if errors.As(err, new(*json.SyntaxError)) { - _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) - errC <- errors.New(responseText.String()) - } else { - errC <- err - } - return - } - - resultC <- res - }() - - return resultC, errC -} - -// legacyContainerWait returns immediately and doesn't have an option to wait -// until the container is removed. -func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) { - resultC := make(chan container.WaitResponse) - errC := make(chan error) - - go func() { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - errC <- err - return - } - defer ensureReaderClosed(resp) - - var res container.WaitResponse - if err := json.NewDecoder(resp.Body).Decode(&res); err != nil { - errC <- err - return - } - - resultC <- res - }() - - return resultC, errC -} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go deleted file mode 100644 index ed788125c..000000000 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types" -) - -// DiskUsage requests the current data usage from the daemon -func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) { - var query url.Values - if len(options.Types) > 0 { - query = url.Values{} - for _, t := range options.Types { - query.Add("type", string(t)) - } - } - - resp, err := cli.get(ctx, "/system/df", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return types.DiskUsage{}, err - } - - var du types.DiskUsage - if err := json.NewDecoder(resp.Body).Decode(&du); err != nil { - return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) - } - return du, nil -} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go deleted file mode 100644 index b8654b24f..000000000 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ /dev/null @@ -1,39 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/docker/api/types/registry" -) - -// DistributionInspect returns the image digest with the full manifest. -func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedRegistryAuth string) (registry.DistributionInspect, error) { - if imageRef == "" { - return registry.DistributionInspect{}, objectNotFoundError{object: "distribution", id: imageRef} - } - - if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil { - return registry.DistributionInspect{}, err - } - - var headers http.Header - if encodedRegistryAuth != "" { - headers = http.Header{ - registry.AuthHeader: {encodedRegistryAuth}, - } - } - - // Contact the registry to retrieve digest and platform information - resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers) - defer ensureReaderClosed(resp) - if err != nil { - return registry.DistributionInspect{}, err - } - - var distributionInspect registry.DistributionInspect - err = json.NewDecoder(resp.Body).Decode(&distributionInspect) - return distributionInspect, err -} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go deleted file mode 100644 index c71d2a088..000000000 --- a/vendor/github.com/docker/docker/client/events.go +++ /dev/null @@ -1,100 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "time" - - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - timetypes "github.com/docker/docker/api/types/time" -) - -// Events returns a stream of events in the daemon. It's up to the caller to close the stream -// by cancelling the context. Once the stream has been completely read an io.EOF error will -// be sent over the error channel. If an error is sent all processing will be stopped. It's up -// to the caller to reopen the stream in the event of an error by reinvoking this method. -func (cli *Client) Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) { - messages := make(chan events.Message) - errs := make(chan error, 1) - - started := make(chan struct{}) - go func() { - defer close(errs) - - query, err := buildEventsQueryParams(cli.version, options) - if err != nil { - close(started) - errs <- err - return - } - - resp, err := cli.get(ctx, "/events", query, nil) - if err != nil { - close(started) - errs <- err - return - } - defer resp.Body.Close() - - decoder := json.NewDecoder(resp.Body) - - close(started) - for { - select { - case <-ctx.Done(): - errs <- ctx.Err() - return - default: - var event events.Message - if err := decoder.Decode(&event); err != nil { - errs <- err - return - } - - select { - case messages <- event: - case <-ctx.Done(): - errs <- ctx.Err() - return - } - } - } - }() - <-started - - return messages, errs -} - -func buildEventsQueryParams(cliVersion string, options events.ListOptions) (url.Values, error) { - query := url.Values{} - ref := time.Now() - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, ref) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, ref) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) - if err != nil { - return nil, err - } - query.Set("filters", filterJSON) - } - - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go deleted file mode 100644 index 1aa061eb0..000000000 --- a/vendor/github.com/docker/docker/client/image_create.go +++ /dev/null @@ -1,40 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/registry" -) - -// ImageCreate creates a new image based on the parent options. -// It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(parentReference) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", ref.Name()) - query.Set("tag", getAPITagFromNamedRef(ref)) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.Body, nil -} - -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { - return cli.post(ctx, "/images/create", query, nil, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go deleted file mode 100644 index 5849d85bd..000000000 --- a/vendor/github.com/docker/docker/client/image_import.go +++ /dev/null @@ -1,48 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types/image" -) - -// ImageImport creates a new image based on the source options. -// It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) { - if ref != "" { - // Check if the given image name can be resolved - if _, err := reference.ParseNormalizedNamed(ref); err != nil { - return nil, err - } - } - - query := url.Values{} - if source.SourceName != "" { - query.Set("fromSrc", source.SourceName) - } - if ref != "" { - query.Set("repo", ref) - } - if options.Tag != "" { - query.Set("tag", options.Tag) - } - if options.Message != "" { - query.Set("message", options.Message) - } - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - for _, change := range options.Changes { - query.Add("changes", change) - } - - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) - if err != nil { - return nil, err - } - return resp.Body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go deleted file mode 100644 index d88f0f141..000000000 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ /dev/null @@ -1,76 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/url" - - "github.com/docker/docker/api/types/image" -) - -// ImageInspect returns the image information. -func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts ...ImageInspectOption) (image.InspectResponse, error) { - if imageID == "" { - return image.InspectResponse{}, objectNotFoundError{object: "image", id: imageID} - } - - var opts imageInspectOpts - for _, opt := range inspectOpts { - if err := opt.Apply(&opts); err != nil { - return image.InspectResponse{}, fmt.Errorf("error applying image inspect option: %w", err) - } - } - - query := url.Values{} - if opts.apiOptions.Manifests { - if err := cli.NewVersionError(ctx, "1.48", "manifests"); err != nil { - return image.InspectResponse{}, err - } - query.Set("manifests", "1") - } - - if opts.apiOptions.Platform != nil { - if err := cli.NewVersionError(ctx, "1.49", "platform"); err != nil { - return image.InspectResponse{}, err - } - platform, err := encodePlatform(opts.apiOptions.Platform) - if err != nil { - return image.InspectResponse{}, err - } - query.Set("platform", platform) - } - - resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return image.InspectResponse{}, err - } - - buf := opts.raw - if buf == nil { - buf = &bytes.Buffer{} - } - - if _, err := io.Copy(buf, resp.Body); err != nil { - return image.InspectResponse{}, err - } - - var response image.InspectResponse - err = json.Unmarshal(buf.Bytes(), &response) - return response, err -} - -// ImageInspectWithRaw returns the image information and its raw representation. -// -// Deprecated: Use [Client.ImageInspect] instead. Raw response can be obtained using the [ImageInspectWithRawResponse] option. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (image.InspectResponse, []byte, error) { - var buf bytes.Buffer - resp, err := cli.ImageInspect(ctx, imageID, ImageInspectWithRawResponse(&buf)) - if err != nil { - return image.InspectResponse{}, nil, err - } - return resp, buf.Bytes(), err -} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go deleted file mode 100644 index e1911eb7e..000000000 --- a/vendor/github.com/docker/docker/client/image_list.go +++ /dev/null @@ -1,67 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/versions" -) - -// ImageList returns a list of images in the docker host. -// -// Experimental: Setting the [options.Manifest] will populate -// [image.Summary.Manifests] with information about image manifests. -// This is experimental and might change in the future without any backward -// compatibility. -func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) { - var images []image.Summary - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return images, err - } - - query := url.Values{} - - optionFilters := options.Filters - referenceFilters := optionFilters.Get("reference") - if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { - query.Set("filter", referenceFilters[0]) - for _, filterValue := range referenceFilters { - optionFilters.Del("reference", filterValue) - } - } - if optionFilters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) - if err != nil { - return images, err - } - query.Set("filters", filterJSON) - } - if options.All { - query.Set("all", "1") - } - if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("shared-size", "1") - } - if options.Manifests && versions.GreaterThanOrEqualTo(cli.version, "1.47") { - query.Set("manifests", "1") - } - - resp, err := cli.get(ctx, "/images/json", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return images, err - } - - err = json.NewDecoder(resp.Body).Decode(&images) - return images, err -} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go deleted file mode 100644 index d83877d4b..000000000 --- a/vendor/github.com/docker/docker/client/image_load.go +++ /dev/null @@ -1,54 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/docker/docker/api/types/image" -) - -// ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser in the -// ImageLoadResponse returned by this function. -// -// Platform is an optional parameter that specifies the platform to load from -// the provided multi-platform image. This is only has effect if the input image -// is a multi-platform image. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, loadOpts ...ImageLoadOption) (image.LoadResponse, error) { - var opts imageLoadOpts - for _, opt := range loadOpts { - if err := opt.Apply(&opts); err != nil { - return image.LoadResponse{}, err - } - } - - query := url.Values{} - query.Set("quiet", "0") - if opts.apiOptions.Quiet { - query.Set("quiet", "1") - } - if len(opts.apiOptions.Platforms) > 0 { - if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil { - return image.LoadResponse{}, err - } - - p, err := encodePlatforms(opts.apiOptions.Platforms...) - if err != nil { - return image.LoadResponse{}, err - } - query["platform"] = p - } - - resp, err := cli.postRaw(ctx, "/images/load", query, input, http.Header{ - "Content-Type": {"application/x-tar"}, - }) - if err != nil { - return image.LoadResponse{}, err - } - return image.LoadResponse{ - Body: resp.Body, - JSON: resp.Header.Get("Content-Type") == "application/json", - }, nil -} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go deleted file mode 100644 index 7c354d7b1..000000000 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" -) - -// ImagesPrune requests the daemon to delete unused data -func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (image.PruneReport, error) { - if err := cli.NewVersionError(ctx, "1.25", "image prune"); err != nil { - return image.PruneReport{}, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return image.PruneReport{}, err - } - - resp, err := cli.post(ctx, "/images/prune", query, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return image.PruneReport{}, err - } - - var report image.PruneReport - if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { - return image.PruneReport{}, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go deleted file mode 100644 index ae8c807f7..000000000 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ /dev/null @@ -1,64 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - cerrdefs "github.com/containerd/errdefs" - "github.com/distribution/reference" - "github.com/docker/docker/api/types/image" -) - -// ImagePull requests the docker host to pull an image from a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.PullOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(refStr) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", ref.Name()) - if !options.All { - query.Set("tag", getAPITagFromNamedRef(ref)) - } - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.Body, nil -} - -// getAPITagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api expects -// digests to be sent as tags and makes a distinction between the name -// and tag/digest part of a reference. -func getAPITagFromNamedRef(ref reference.Named) string { - if digested, ok := ref.(reference.Digested); ok { - return digested.Digest().String() - } - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - return tagged.Tag() - } - return "" -} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go deleted file mode 100644 index 521030440..000000000 --- a/vendor/github.com/docker/docker/client/image_push.go +++ /dev/null @@ -1,72 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - - cerrdefs "github.com/containerd/errdefs" - "github.com/distribution/reference" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/registry" -) - -// ImagePush requests the docker host to push an image to a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, image string, options image.PushOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - query := url.Values{} - if !options.All { - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } - } - - if options.Platform != nil { - if err := cli.NewVersionError(ctx, "1.46", "platform"); err != nil { - return nil, err - } - - p := *options.Platform - pJson, err := json.Marshal(p) - if err != nil { - return nil, fmt.Errorf("invalid platform: %v", err) - } - - query.Set("platform", string(pJson)) - } - - resp, err := cli.tryImagePush(ctx, ref.Name(), query, options.RegistryAuth) - if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImagePush(ctx, ref.Name(), query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.Body, nil -} - -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*http.Response, error) { - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go deleted file mode 100644 index 25c7360df..000000000 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/distribution/reference" - "github.com/pkg/errors" -) - -// ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, source, target string) error { - if _, err := reference.ParseAnyReference(source); err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) - } - - ref, err := reference.ParseNormalizedNamed(target) - if err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - ref = reference.TagNameOnly(ref) - - query := url.Values{} - query.Set("repo", ref.Name()) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } - - resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go deleted file mode 100644 index 6396f4b60..000000000 --- a/vendor/github.com/docker/docker/client/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types/system" -) - -// Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (system.Info, error) { - var info system.Info - resp, err := cli.get(ctx, "/info", url.Values{}, nil) - defer ensureReaderClosed(resp) - if err != nil { - return info, err - } - - if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { - return info, fmt.Errorf("Error reading remote info: %v", err) - } - - return info, nil -} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go deleted file mode 100644 index d3572c1bf..000000000 --- a/vendor/github.com/docker/docker/client/login.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/registry" -) - -// RegistryLogin authenticates the docker server with a given docker registry. -// It returns unauthorizedError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) { - resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) - defer ensureReaderClosed(resp) - - if err != nil { - return registry.AuthenticateOKBody{}, err - } - - var response registry.AuthenticateOKBody - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go deleted file mode 100644 index fa7cc34fa..000000000 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/network" -) - -// NetworkConnect connects a container to an existent network in the docker host. -func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - networkID, err := trimID("network", networkID) - if err != nil { - return err - } - - containerID, err = trimID("container", containerID) - if err != nil { - return err - } - - nc := network.ConnectOptions{ - Container: containerID, - EndpointConfig: config, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go deleted file mode 100644 index eef951445..000000000 --- a/vendor/github.com/docker/docker/client/network_create.go +++ /dev/null @@ -1,40 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/versions" -) - -// NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) { - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return network.CreateResponse{}, err - } - - networkCreateRequest := network.CreateRequest{ - CreateOptions: options, - Name: name, - } - if versions.LessThan(cli.version, "1.44") { - enabled := true - networkCreateRequest.CheckDuplicate = &enabled //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. - } - - resp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - defer ensureReaderClosed(resp) - if err != nil { - return network.CreateResponse{}, err - } - - var response network.CreateResponse - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go deleted file mode 100644 index d8051df2f..000000000 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/network" -) - -// NetworkDisconnect disconnects a container from an existent network in the docker host. -func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - networkID, err := trimID("network", networkID) - if err != nil { - return err - } - - containerID, err = trimID("container", containerID) - if err != nil { - return err - } - - nd := network.DisconnectOptions{ - Container: containerID, - Force: force, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go deleted file mode 100644 index 1387c080a..000000000 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ /dev/null @@ -1,47 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/url" - - "github.com/docker/docker/api/types/network" -) - -// NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) - return networkResource, err -} - -// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, []byte, error) { - networkID, err := trimID("network", networkID) - if err != nil { - return network.Inspect{}, nil, err - } - query := url.Values{} - if options.Verbose { - query.Set("verbose", "true") - } - if options.Scope != "" { - query.Set("scope", options.Scope) - } - - resp, err := cli.get(ctx, "/networks/"+networkID, query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return network.Inspect{}, nil, err - } - - raw, err := io.ReadAll(resp.Body) - if err != nil { - return network.Inspect{}, nil, err - } - - var nw network.Inspect - err = json.NewDecoder(bytes.NewReader(raw)).Decode(&nw) - return nw, raw, err -} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go deleted file mode 100644 index e1b4fca73..000000000 --- a/vendor/github.com/docker/docker/client/network_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" -) - -// NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var networkResources []network.Summary - resp, err := cli.get(ctx, "/networks", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return networkResources, err - } - err = json.NewDecoder(resp.Body).Decode(&networkResources) - return networkResources, err -} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go deleted file mode 100644 index 90d3679f3..000000000 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" -) - -// NetworksPrune requests the daemon to delete unused networks -func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (network.PruneReport, error) { - if err := cli.NewVersionError(ctx, "1.25", "network prune"); err != nil { - return network.PruneReport{}, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return network.PruneReport{}, err - } - - resp, err := cli.post(ctx, "/networks/prune", query, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return network.PruneReport{}, err - } - - var report network.PruneReport - if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { - return network.PruneReport{}, fmt.Errorf("Error retrieving network prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go deleted file mode 100644 index 89fdaaf3a..000000000 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// NetworkRemove removes an existent network from the docker host. -func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { - networkID, err := trimID("network", networkID) - if err != nil { - return err - } - resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go deleted file mode 100644 index 5d3343dc4..000000000 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeInspectWithRaw returns the node information. -func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - nodeID, err := trimID("node", nodeID) - if err != nil { - return swarm.Node{}, nil, err - } - resp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Node{}, nil, err - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return swarm.Node{}, nil, err - } - - var response swarm.Node - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go deleted file mode 100644 index 429eec244..000000000 --- a/vendor/github.com/docker/docker/client/node_list.go +++ /dev/null @@ -1,34 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// NodeList returns the list of nodes. -func (cli *Client) NodeList(ctx context.Context, options swarm.NodeListOptions) ([]swarm.Node, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/nodes", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var nodes []swarm.Node - err = json.NewDecoder(resp.Body).Decode(&nodes) - return nodes, err -} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go deleted file mode 100644 index 10e218661..000000000 --- a/vendor/github.com/docker/docker/client/node_update.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeUpdate updates a Node. -func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { - nodeID, err := trimID("node", nodeID) - if err != nil { - return err - } - - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go deleted file mode 100644 index 2ffa5945f..000000000 --- a/vendor/github.com/docker/docker/client/ping.go +++ /dev/null @@ -1,81 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/http" - "path" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/build" - "github.com/docker/docker/api/types/swarm" -) - -// Ping pings the server and returns the value of the "Docker-Experimental", -// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use -// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported -// by the daemon. It ignores internal server errors returned by the API, which -// may be returned if the daemon is in an unhealthy state, but returns errors -// for other non-success status codes, failing to connect to the API, or failing -// to parse the API response. -func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { - var ping types.Ping - - // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() - // because ping requests are used during API version negotiation, so we want - // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping - req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - resp, err := cli.doRequest(req) - if err != nil { - if IsErrConnectionFailed(err) { - return ping, err - } - // We managed to connect, but got some error; continue and try GET request. - } else { - defer ensureReaderClosed(resp) - switch resp.StatusCode { - case http.StatusOK, http.StatusInternalServerError: - // Server handled the request, so parse the response - return parsePingResponse(cli, resp) - } - } - - // HEAD failed; fallback to GET. - req.Method = http.MethodGet - resp, err = cli.doRequest(req) - defer ensureReaderClosed(resp) - if err != nil { - return ping, err - } - return parsePingResponse(cli, resp) -} - -func parsePingResponse(cli *Client, resp *http.Response) (types.Ping, error) { - if resp == nil { - return types.Ping{}, nil - } - - var ping types.Ping - if resp.Header == nil { - return ping, cli.checkResponseErr(resp) - } - ping.APIVersion = resp.Header.Get("Api-Version") - ping.OSType = resp.Header.Get("Ostype") - if resp.Header.Get("Docker-Experimental") == "true" { - ping.Experimental = true - } - if bv := resp.Header.Get("Builder-Version"); bv != "" { - ping.BuilderVersion = build.BuilderVersion(bv) - } - if si := resp.Header.Get("Swarm"); si != "" { - state, role, _ := strings.Cut(si, "/") - ping.SwarmStatus = &swarm.Status{ - NodeState: swarm.LocalNodeState(state), - ControlAvailable: role == "manager", - } - } - return ping, cli.checkResponseErr(resp) -} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go deleted file mode 100644 index b95dbaf68..000000000 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginCreate creates a plugin -func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { - headers := http.Header(make(map[string][]string)) - headers.Set("Content-Type", "application/x-tar") - - query := url.Values{} - query.Set("name", createOptions.RepoName) - - resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go deleted file mode 100644 index 9fabe77bf..000000000 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginDisable disables a plugin -func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { - name, err := trimID("plugin", name) - if err != nil { - return err - } - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go deleted file mode 100644 index 492d0bcff..000000000 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" -) - -// PluginEnable enables a plugin -func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { - name, err := trimID("plugin", name) - if err != nil { - return err - } - query := url.Values{} - query.Set("timeout", strconv.Itoa(options.Timeout)) - - resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go deleted file mode 100644 index 8f107a760..000000000 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types" -) - -// PluginInspectWithRaw inspects an existing plugin -func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { - name, err := trimID("plugin", name) - if err != nil { - return nil, nil, err - } - resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, nil, err - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, nil, err - } - var p types.Plugin - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&p) - return &p, body, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go deleted file mode 100644 index 8553961ba..000000000 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ /dev/null @@ -1,117 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "io" - "net/http" - "net/url" - - cerrdefs "github.com/containerd/errdefs" - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "github.com/pkg/errors" -) - -// PluginInstall installs a plugin -func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (_ io.ReadCloser, retErr error) { - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - // set name for plugin pull, if empty should default to remote reference - query.Set("name", name) - - resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) - if err != nil { - return nil, err - } - - name = resp.Header.Get("Docker-Plugin-Name") - - pr, pw := io.Pipe() - go func() { // todo: the client should probably be designed more around the actual api - _, err := io.Copy(pw, resp.Body) - if err != nil { - _ = pw.CloseWithError(err) - return - } - defer func() { - if retErr != nil { - delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) - ensureReaderClosed(delResp) - } - }() - if len(options.Args) > 0 { - if err := cli.PluginSet(ctx, name, options.Args); err != nil { - _ = pw.CloseWithError(err) - return - } - } - - if options.Disabled { - _ = pw.Close() - return - } - - enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) - _ = pw.CloseWithError(enableErr) - }() - return pr, nil -} - -func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { - return cli.get(ctx, "/plugins/privileges", query, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} - -func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (*http.Response, error) { - return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} - -func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { - resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - // todo: do inspect before to check existing name before checking privileges - newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) - if privilegeErr != nil { - ensureReaderClosed(resp) - return nil, privilegeErr - } - options.RegistryAuth = newAuthHeader - resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - } - if err != nil { - ensureReaderClosed(resp) - return nil, err - } - - var privileges types.PluginPrivileges - if err := json.NewDecoder(resp.Body).Decode(&privileges); err != nil { - ensureReaderClosed(resp) - return nil, err - } - ensureReaderClosed(resp) - - if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { - accept, err := options.AcceptPermissionsFunc(ctx, privileges) - if err != nil { - return nil, err - } - if !accept { - return nil, errors.Errorf("permission denied while installing plugin %s", options.RemoteRef) - } - } - return privileges, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go deleted file mode 100644 index 03bcf7621..000000000 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// PluginList returns the installed plugins -func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { - var plugins types.PluginsListResponse - query := url.Values{} - - if filter.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return plugins, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/plugins", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return plugins, err - } - - err = json.NewDecoder(resp.Body).Decode(&plugins) - return plugins, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go deleted file mode 100644 index da15e449d..000000000 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - - "github.com/docker/docker/api/types/registry" -) - -// PluginPush pushes a plugin to a registry -func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { - name, err := trimID("plugin", name) - if err != nil { - return nil, err - } - resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{ - registry.AuthHeader: {registryAuth}, - }) - if err != nil { - return nil, err - } - return resp.Body, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go deleted file mode 100644 index 6ee107e3c..000000000 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginRemove removes a plugin -func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { - name, err := trimID("plugin", name) - if err != nil { - return err - } - - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go deleted file mode 100644 index e2a79838d..000000000 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" -) - -// PluginSet modifies settings for an existing plugin -func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { - name, err := trimID("plugin", name) - if err != nil { - return err - } - - resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go deleted file mode 100644 index 4abb29cf0..000000000 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ /dev/null @@ -1,47 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "github.com/pkg/errors" -) - -// PluginUpgrade upgrades a plugin -func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) { - name, err := trimID("plugin", name) - if err != nil { - return nil, err - } - - if err := cli.NewVersionError(ctx, "1.26", "plugin upgrade"); err != nil { - return nil, err - } - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.Body, nil -} - -func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (*http.Response, error) { - return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go deleted file mode 100644 index aee051b90..000000000 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretCreate creates a new secret. -func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (swarm.SecretCreateResponse, error) { - if err := cli.NewVersionError(ctx, "1.25", "secret create"); err != nil { - return swarm.SecretCreateResponse{}, err - } - resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.SecretCreateResponse{}, err - } - - var response swarm.SecretCreateResponse - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go deleted file mode 100644 index fdabc197f..000000000 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretInspectWithRaw returns the secret information with raw data -func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - id, err := trimID("secret", id) - if err != nil { - return swarm.Secret{}, nil, err - } - if err := cli.NewVersionError(ctx, "1.25", "secret inspect"); err != nil { - return swarm.Secret{}, nil, err - } - resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Secret{}, nil, err - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return swarm.Secret{}, nil, err - } - - var secret swarm.Secret - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&secret) - - return secret, body, err -} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go deleted file mode 100644 index b158d99ed..000000000 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// SecretList returns the list of secrets. -func (cli *Client) SecretList(ctx context.Context, options swarm.SecretListOptions) ([]swarm.Secret, error) { - if err := cli.NewVersionError(ctx, "1.25", "secret list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/secrets", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var secrets []swarm.Secret - err = json.NewDecoder(resp.Body).Decode(&secrets) - return secrets, err -} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go deleted file mode 100644 index 7ea2acbf5..000000000 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// SecretRemove removes a secret. -func (cli *Client) SecretRemove(ctx context.Context, id string) error { - id, err := trimID("secret", id) - if err != nil { - return err - } - if err := cli.NewVersionError(ctx, "1.25", "secret remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go deleted file mode 100644 index 60d21a6f2..000000000 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretUpdate attempts to update a secret. -func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { - id, err := trimID("secret", id) - if err != nil { - return err - } - if err := cli.NewVersionError(ctx, "1.25", "secret update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go deleted file mode 100644 index 892e9004f..000000000 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts swarm.ServiceInspectOptions) (swarm.Service, []byte, error) { - serviceID, err := trimID("service", serviceID) - if err != nil { - return swarm.Service{}, nil, err - } - - query := url.Values{} - query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) - resp, err := cli.get(ctx, "/services/"+serviceID, query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Service{}, nil, err - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return swarm.Service{}, nil, err - } - - var response swarm.Service - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go deleted file mode 100644 index 019873bb6..000000000 --- a/vendor/github.com/docker/docker/client/service_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceList returns the list of services. -func (cli *Client) ServiceList(ctx context.Context, options swarm.ServiceListOptions) ([]swarm.Service, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - if options.Status { - query.Set("status", "true") - } - - resp, err := cli.get(ctx, "/services", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var services []swarm.Service - err = json.NewDecoder(resp.Body).Decode(&services) - return services, err -} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go deleted file mode 100644 index 6e0cbee49..000000000 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ /dev/null @@ -1,57 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types/container" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ServiceLogs returns the logs generated by a service in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) { - serviceID, err := trimID("service", serviceID) - if err != nil { - return nil, err - } - - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.Body, nil -} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go deleted file mode 100644 index 93c949e44..000000000 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ServiceRemove kills and removes a service. -func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { - serviceID, err := trimID("service", serviceID) - if err != nil { - return err - } - - resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go deleted file mode 100644 index e0c1a2664..000000000 --- a/vendor/github.com/docker/docker/client/service_update.go +++ /dev/null @@ -1,89 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/versions" -) - -// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. -// It should be the value as set *before* the update. You can find this value in the Meta field -// of swarm.Service, which can be found using ServiceInspectWithRaw. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) { - serviceID, err := trimID("service", serviceID) - if err != nil { - return swarm.ServiceUpdateResponse{}, err - } - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return swarm.ServiceUpdateResponse{}, err - } - - query := url.Values{} - if options.RegistryAuthFrom != "" { - query.Set("registryAuthFrom", options.RegistryAuthFrom) - } - - if options.Rollback != "" { - query.Set("rollback", options.Rollback) - } - - query.Set("version", version.String()) - - if err := validateServiceSpec(service); err != nil { - return swarm.ServiceUpdateResponse{}, err - } - - // ensure that the image is tagged - var resolveWarning string - switch { - case service.TaskTemplate.ContainerSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - case service.TaskTemplate.PluginSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - } - - headers := http.Header{} - if versions.LessThan(cli.version, "1.30") { - // the custom "version" header was used by engine API before 20.10 - // (API 1.30) to switch between client- and server-side lookup of - // image digests. - headers["version"] = []string{cli.version} - } - if options.EncodedRegistryAuth != "" { - headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} - } - resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.ServiceUpdateResponse{}, err - } - - var response swarm.ServiceUpdateResponse - err = json.NewDecoder(resp.Body).Decode(&response) - if resolveWarning != "" { - response.Warnings = append(response.Warnings, resolveWarning) - } - - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go deleted file mode 100644 index 6e30daf61..000000000 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmGetUnlockKey retrieves the swarm's unlock key. -func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (swarm.UnlockKeyResponse, error) { - resp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.UnlockKeyResponse{}, err - } - - var response swarm.UnlockKeyResponse - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go deleted file mode 100644 index 3dcb2a5b5..000000000 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInit initializes the swarm. -func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { - resp, err := cli.post(ctx, "/swarm/init", nil, req, nil) - defer ensureReaderClosed(resp) - if err != nil { - return "", err - } - - var response string - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go deleted file mode 100644 index 3d5a8a042..000000000 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInspect inspects the swarm. -func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { - resp, err := cli.get(ctx, "/swarm", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Swarm{}, err - } - - var response swarm.Swarm - err = json.NewDecoder(resp.Body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go deleted file mode 100644 index a1cf0455d..000000000 --- a/vendor/github.com/docker/docker/client/swarm_join.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmJoin joins the swarm. -func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { - resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go deleted file mode 100644 index 90ca84b36..000000000 --- a/vendor/github.com/docker/docker/client/swarm_leave.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// SwarmLeave leaves the swarm. -func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { - query := url.Values{} - if force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go deleted file mode 100644 index 745d64d5b..000000000 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUnlock unlocks locked swarm. -func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { - resp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go deleted file mode 100644 index 9fde7d75e..000000000 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUpdate updates the swarm. -func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { - query := url.Values{} - query.Set("version", version.String()) - query.Set("rotateWorkerToken", strconv.FormatBool(flags.RotateWorkerToken)) - query.Set("rotateManagerToken", strconv.FormatBool(flags.RotateManagerToken)) - query.Set("rotateManagerUnlockKey", strconv.FormatBool(flags.RotateManagerUnlockKey)) - resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go deleted file mode 100644 index 37668bd27..000000000 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ /dev/null @@ -1,34 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// TaskInspectWithRaw returns the task information and its raw representation. -func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - taskID, err := trimID("task", taskID) - if err != nil { - return swarm.Task{}, nil, err - } - - resp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Task{}, nil, err - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return swarm.Task{}, nil, err - } - - var response swarm.Task - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go deleted file mode 100644 index 5d540c383..000000000 --- a/vendor/github.com/docker/docker/client/task_list.go +++ /dev/null @@ -1,34 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// TaskList returns the list of tasks. -func (cli *Client) TaskList(ctx context.Context, options swarm.TaskListOptions) ([]swarm.Task, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/tasks", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var tasks []swarm.Task - err = json.NewDecoder(resp.Body).Decode(&tasks) - return tasks, err -} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go deleted file mode 100644 index 9dcb977b3..000000000 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ /dev/null @@ -1,51 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types/container" - timetypes "github.com/docker/docker/api/types/time" -) - -// TaskLogs returns the logs generated by a task in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.Body, nil -} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go deleted file mode 100644 index 4566fd98e..000000000 --- a/vendor/github.com/docker/docker/client/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// ServerVersion returns information of the docker client and server host. -func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { - resp, err := cli.get(ctx, "/version", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return types.Version{}, err - } - - var server types.Version - err = json.NewDecoder(resp.Body).Decode(&server) - return server, err -} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go deleted file mode 100644 index bedb3abbb..000000000 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/volume" -) - -// VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { - resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) - defer ensureReaderClosed(resp) - if err != nil { - return volume.Volume{}, err - } - - var vol volume.Volume - err = json.NewDecoder(resp.Body).Decode(&vol) - return vol, err -} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go deleted file mode 100644 index ce32bbbb7..000000000 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ /dev/null @@ -1,40 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/volume" -) - -// VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) { - vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return vol, err -} - -// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { - volumeID, err := trimID("volume", volumeID) - if err != nil { - return volume.Volume{}, nil, err - } - - resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return volume.Volume{}, nil, err - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return volume.Volume{}, nil, err - } - - var vol volume.Volume - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&vol) - return vol, body, err -} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go deleted file mode 100644 index de6ce23a4..000000000 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/volume" -) - -// VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return volume.ListResponse{}, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/volumes", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return volume.ListResponse{}, err - } - - var volumes volume.ListResponse - err = json.NewDecoder(resp.Body).Decode(&volumes) - return volumes, err -} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go deleted file mode 100644 index 7da148fea..000000000 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/volume" -) - -// VolumesPrune requests the daemon to delete unused data -func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (volume.PruneReport, error) { - if err := cli.NewVersionError(ctx, "1.25", "volume prune"); err != nil { - return volume.PruneReport{}, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return volume.PruneReport{}, err - } - - resp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return volume.PruneReport{}, err - } - - var report volume.PruneReport - if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { - return volume.PruneReport{}, fmt.Errorf("Error retrieving volume prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go deleted file mode 100644 index eefd9ce43..000000000 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ /dev/null @@ -1,34 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/versions" -) - -// VolumeRemove removes a volume from the docker host. -func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { - volumeID, err := trimID("volume", volumeID) - if err != nil { - return err - } - - query := url.Values{} - if force { - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return err - } - if versions.GreaterThanOrEqualTo(cli.version, "1.25") { - query.Set("force", "1") - } - } - resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go deleted file mode 100644 index c91d5e984..000000000 --- a/vendor/github.com/docker/docker/client/volume_update.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/volume" -) - -// VolumeUpdate updates a volume. This only works for Cluster Volumes, and -// only some fields can be updated. -func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { - volumeID, err := trimID("volume", volumeID) - if err != nil { - return err - } - if err := cli.NewVersionError(ctx, "1.42", "volume update"); err != nil { - return err - } - - query := url.Values{} - query.Set("version", version.String()) - - resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go b/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go deleted file mode 100644 index 6334edb60..000000000 --- a/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code below was largely copied from golang.org/x/mod@v0.22; -// https://github.com/golang/mod/blob/v0.22.0/internal/lazyregexp/lazyre.go -// with some additional methods added. - -// Package lazyregexp is a thin wrapper over regexp, allowing the use of global -// regexp variables without forcing them to be compiled at init. -package lazyregexp - -import ( - "os" - "regexp" - "strings" - "sync" -) - -// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be -// compiled the first time it is needed. -type Regexp struct { - str string - once sync.Once - rx *regexp.Regexp -} - -func (r *Regexp) re() *regexp.Regexp { - r.once.Do(r.build) - return r.rx -} - -func (r *Regexp) build() { - r.rx = regexp.MustCompile(r.str) - r.str = "" -} - -func (r *Regexp) FindSubmatch(s []byte) [][]byte { - return r.re().FindSubmatch(s) -} - -func (r *Regexp) FindAllStringSubmatch(s string, n int) [][]string { - return r.re().FindAllStringSubmatch(s, n) -} - -func (r *Regexp) FindStringSubmatch(s string) []string { - return r.re().FindStringSubmatch(s) -} - -func (r *Regexp) FindStringSubmatchIndex(s string) []int { - return r.re().FindStringSubmatchIndex(s) -} - -func (r *Regexp) ReplaceAllString(src, repl string) string { - return r.re().ReplaceAllString(src, repl) -} - -func (r *Regexp) FindString(s string) string { - return r.re().FindString(s) -} - -func (r *Regexp) FindAllString(s string, n int) []string { - return r.re().FindAllString(s, n) -} - -func (r *Regexp) MatchString(s string) bool { - return r.re().MatchString(s) -} - -func (r *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { - return r.re().ReplaceAllStringFunc(src, repl) -} - -func (r *Regexp) SubexpNames() []string { - return r.re().SubexpNames() -} - -var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") - -// New creates a new lazy regexp, delaying the compiling work until it is first -// needed. If the code is being run as part of tests, the regexp compiling will -// happen immediately. -func New(str string) *Regexp { - lr := &Regexp{str: str} - if inTest { - // In tests, always compile the regexps early. - lr.re() - } - return lr -} diff --git a/vendor/github.com/docker/docker/internal/multierror/multierror.go b/vendor/github.com/docker/docker/internal/multierror/multierror.go deleted file mode 100644 index e899f4de8..000000000 --- a/vendor/github.com/docker/docker/internal/multierror/multierror.go +++ /dev/null @@ -1,46 +0,0 @@ -package multierror - -import ( - "strings" -) - -// Join is a drop-in replacement for errors.Join with better formatting. -func Join(errs ...error) error { - n := 0 - for _, err := range errs { - if err != nil { - n++ - } - } - if n == 0 { - return nil - } - e := &joinError{ - errs: make([]error, 0, n), - } - for _, err := range errs { - if err != nil { - e.errs = append(e.errs, err) - } - } - return e -} - -type joinError struct { - errs []error -} - -func (e *joinError) Error() string { - if len(e.errs) == 1 { - return strings.TrimSpace(e.errs[0].Error()) - } - stringErrs := make([]string, 0, len(e.errs)) - for _, subErr := range e.errs { - stringErrs = append(stringErrs, strings.ReplaceAll(subErr.Error(), "\n", "\n\t")) - } - return "* " + strings.Join(stringErrs, "\n* ") -} - -func (e *joinError) Unwrap() []error { - return e.errs -} diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go deleted file mode 100644 index 4049d780c..000000000 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ /dev/null @@ -1,240 +0,0 @@ -// Package nat is a convenience package for manipulation of strings describing network ports. -package nat - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -// PortBinding represents a binding between a Host IP address and a Host Port -type PortBinding struct { - // HostIP is the host IP Address - HostIP string `json:"HostIp"` - // HostPort is the host port number - HostPort string -} - -// PortMap is a collection of PortBinding indexed by Port -type PortMap map[Port][]PortBinding - -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// NewPort creates a new instance of a Port given a protocol and port number or port range -func NewPort(proto, port string) (Port, error) { - // Check for parsing issues on "port" now so we can avoid having - // to check it later on. - - portStartInt, portEndInt, err := ParsePortRangeToInt(port) - if err != nil { - return "", err - } - - if portStartInt == portEndInt { - return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil - } - return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil -} - -// ParsePort parses the port number string and returns an int -func ParsePort(rawPort string) (int, error) { - if len(rawPort) == 0 { - return 0, nil - } - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// ParsePortRangeToInt parses the port range string and returns start/end ints -func ParsePortRangeToInt(rawPort string) (int, int, error) { - if len(rawPort) == 0 { - return 0, 0, nil - } - start, end, err := ParsePortRange(rawPort) - if err != nil { - return 0, 0, err - } - return int(start), int(end), nil -} - -// Proto returns the protocol of a Port -func (p Port) Proto() string { - proto, _ := SplitProtoPort(string(p)) - return proto -} - -// Port returns the port number of a Port -func (p Port) Port() string { - _, port := SplitProtoPort(string(p)) - return port -} - -// Int returns the port number of a Port as an int -func (p Port) Int() int { - portStr := p.Port() - // We don't need to check for an error because we're going to - // assume that any error would have been found, and reported, in NewPort() - port, _ := ParsePort(portStr) - return port -} - -// Range returns the start/end port numbers of a Port range as ints -func (p Port) Range() (int, int, error) { - return ParsePortRangeToInt(p.Port()) -} - -// SplitProtoPort splits a port in the format of proto/port -func SplitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - if len(parts[1]) == 0 { - return "tcp", parts[0] - } - return parts[1], parts[0] -} - -func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp", "sctp"} { - if availableProto == proto { - return true - } - } - return false -} - -// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses -// these in to the internal types -func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - for _, rawPort := range ports { - portMappings, err := ParsePortSpec(rawPort) - if err != nil { - return nil, nil, err - } - - for _, portMapping := range portMappings { - port := portMapping.Port - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, portMapping.Binding) - } - } - return exposedPorts, bindings, nil -} - -// PortMapping is a data object mapping a Port to a PortBinding -type PortMapping struct { - Port Port - Binding PortBinding -} - -func splitParts(rawport string) (string, string, string) { - parts := strings.Split(rawport, ":") - n := len(parts) - containerPort := parts[n-1] - - switch n { - case 1: - return "", "", containerPort - case 2: - return "", parts[0], containerPort - case 3: - return parts[0], parts[1], containerPort - default: - return strings.Join(parts[:n-2], ":"), parts[n-2], containerPort - } -} - -// ParsePortSpec parses a port specification string into a slice of PortMappings -func ParsePortSpec(rawPort string) ([]PortMapping, error) { - var proto string - ip, hostPort, containerPort := splitParts(rawPort) - proto, containerPort = SplitProtoPort(containerPort) - - if ip != "" && ip[0] == '[' { - // Strip [] from IPV6 addresses - rawIP, _, err := net.SplitHostPort(ip + ":") - if err != nil { - return nil, fmt.Errorf("invalid IP address %v: %w", ip, err) - } - ip = rawIP - } - if ip != "" && net.ParseIP(ip) == nil { - return nil, fmt.Errorf("invalid IP address: %s", ip) - } - if containerPort == "" { - return nil, fmt.Errorf("no port specified: %s", rawPort) - } - - startPort, endPort, err := ParsePortRange(containerPort) - if err != nil { - return nil, fmt.Errorf("invalid containerPort: %s", containerPort) - } - - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, fmt.Errorf("invalid hostPort: %s", hostPort) - } - } - - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, fmt.Errorf("invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } - - if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("invalid proto: %s", proto) - } - - ports := []PortMapping{} - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, err - } - - binding := PortBinding{ - HostIP: ip, - HostPort: hostPort, - } - ports = append(ports, PortMapping{Port: port, Binding: binding}) - } - return ports, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go deleted file mode 100644 index e4b53e8a3..000000000 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ /dev/null @@ -1,33 +0,0 @@ -package nat - -import ( - "fmt" - "strconv" - "strings" -) - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("empty string specified for ports") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("invalid range specified for port: %s", ports) - } - return start, end, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go deleted file mode 100644 index b6eed145e..000000000 --- a/vendor/github.com/docker/go-connections/nat/sort.go +++ /dev/null @@ -1,96 +0,0 @@ -package nat - -import ( - "sort" - "strings" -) - -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -// Sort sorts a list of ports using the provided predicate -// This function should compare `i` and `j`, returning true if `i` is -// considered to be less than `j` -func Sort(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - -type portMapEntry struct { - port Port - binding PortBinding -} - -type portMapSorter []portMapEntry - -func (s portMapSorter) Len() int { return len(s) } -func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// Less sorts the port so that the order is: -// 1. port with larger specified bindings -// 2. larger port -// 3. port with tcp protocol -func (s portMapSorter) Less(i, j int) bool { - pi, pj := s[i].port, s[j].port - hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) - return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") -} - -// SortPortMap sorts the list of ports and their respected mapping. The ports -// will explicit HostPort will be placed first. -func SortPortMap(ports []Port, bindings PortMap) { - s := portMapSorter{} - for _, p := range ports { - if binding, ok := bindings[p]; ok && len(binding) > 0 { - for _, b := range binding { - s = append(s, portMapEntry{port: p, binding: b}) - } - bindings[p] = []PortBinding{} - } else { - s = append(s, portMapEntry{port: p}) - } - } - - sort.Sort(s) - var ( - i int - pm = make(map[Port]struct{}) - ) - // reorder ports - for _, entry := range s { - if _, ok := pm[entry.port]; !ok { - ports[i] = entry.port - pm[entry.port] = struct{}{} - i++ - } - // reorder bindings for this port - if _, ok := bindings[entry.port]; ok { - bindings[entry.port] = append(bindings[entry.port], entry.binding) - } - } -} - -func toInt(s string) uint64 { - i, _, err := ParsePortRange(s) - if err != nil { - i = 0 - } - return i -} diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go index c897cb02a..f04980e40 100644 --- a/vendor/github.com/docker/go-connections/sockets/proxy.go +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -9,6 +9,8 @@ import ( // GetProxyEnv allows access to the uppercase and the lowercase forms of // proxy-related variables. See the Go specification for details on these // variables. https://golang.org/pkg/net/http/ +// +// Deprecated: this function was used as helper for [DialerFromEnvironment] and is no longer used. It will be removed in the next release. func GetProxyEnv(key string) string { proxyValue := os.Getenv(strings.ToUpper(key)) if proxyValue == "" { @@ -19,10 +21,11 @@ func GetProxyEnv(key string) string { // DialerFromEnvironment was previously used to configure a net.Dialer to route // connections through a SOCKS proxy. -// DEPRECATED: SOCKS proxies are now supported by configuring only +// +// Deprecated: SOCKS proxies are now supported by configuring only // http.Transport.Proxy, and no longer require changing http.Transport.Dial. -// Therefore, only sockets.ConfigureTransport() needs to be called, and any -// sockets.DialerFromEnvironment() calls can be dropped. +// Therefore, only [sockets.ConfigureTransport] needs to be called, and any +// [sockets.DialerFromEnvironment] calls can be dropped. func DialerFromEnvironment(direct *net.Dialer) (*net.Dialer, error) { return direct, nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go index b0eae239d..611729786 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -2,13 +2,19 @@ package sockets import ( + "context" "errors" + "fmt" "net" "net/http" + "syscall" "time" ) -const defaultTimeout = 10 * time.Second +const ( + defaultTimeout = 10 * time.Second + maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +) // ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. var ErrProtocolNotAvailable = errors.New("protocol not available") @@ -35,3 +41,26 @@ func ConfigureTransport(tr *http.Transport, proto, addr string) error { } return nil } + +// DialPipe connects to a Windows named pipe. It is not supported on +// non-Windows platforms. +// +// Deprecated: use [github.com/Microsoft/go-winio.DialPipe] or [github.com/Microsoft/go-winio.DialPipeContext]. +func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { + return dialPipe(addr, timeout) +} + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + dialer := &net.Dialer{ + Timeout: defaultTimeout, + } + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return dialer.DialContext(ctx, proto, addr) + } + return nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go index 78a34a980..913d2f00d 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -3,37 +3,16 @@ package sockets import ( - "context" - "fmt" "net" "net/http" "syscall" "time" ) -const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) - -func configureUnixTransport(tr *http.Transport, proto, addr string) error { - if len(addr) > maxUnixSocketPathSize { - return fmt.Errorf("unix socket path %q is too long", addr) - } - // No need for compression in local communications. - tr.DisableCompression = true - dialer := &net.Dialer{ - Timeout: defaultTimeout, - } - tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { - return dialer.DialContext(ctx, proto, addr) - } - return nil -} - func configureNpipeTransport(tr *http.Transport, proto, addr string) error { return ErrProtocolNotAvailable } -// DialPipe connects to a Windows named pipe. -// This is not supported on other OSes. -func DialPipe(_ string, _ time.Duration) (net.Conn, error) { +func dialPipe(_ string, _ time.Duration) (net.Conn, error) { return nil, syscall.EAFNOSUPPORT } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go index 7acafc5a2..6d6beb385 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -9,10 +9,6 @@ import ( "github.com/Microsoft/go-winio" ) -func configureUnixTransport(tr *http.Transport, proto, addr string) error { - return ErrProtocolNotAvailable -} - func configureNpipeTransport(tr *http.Transport, proto, addr string) error { // No need for compression in local communications. tr.DisableCompression = true @@ -22,7 +18,6 @@ func configureNpipeTransport(tr *http.Transport, proto, addr string) error { return nil } -// DialPipe connects to a Windows named pipe. -func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { +func dialPipe(addr string, timeout time.Duration) (net.Conn, error) { return winio.DialPipe(addr, &timeout) } diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go index b9233521e..e736f71d3 100644 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -1,5 +1,3 @@ -//go:build !windows - /* Package sockets is a simple unix domain socket wrapper. @@ -57,26 +55,6 @@ import ( // SockOption sets up socket file's creating option type SockOption func(string) error -// WithChown modifies the socket file's uid and gid -func WithChown(uid, gid int) SockOption { - return func(path string) error { - if err := os.Chown(path, uid, gid); err != nil { - return err - } - return nil - } -} - -// WithChmod modifies socket file's access mode. -func WithChmod(mask os.FileMode) SockOption { - return func(path string) error { - if err := os.Chmod(path, mask); err != nil { - return err - } - return nil - } -} - // NewUnixSocketWithOpts creates a unix socket with the specified options. // By default, socket permissions are 0000 (i.e.: no access for anyone); pass // WithChmod() and WithChown() to set the desired ownership and permissions. @@ -90,22 +68,7 @@ func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error return nil, err } - // net.Listen does not allow for permissions to be set. As a result, when - // specifying custom permissions ("WithChmod()"), there is a short time - // between creating the socket and applying the permissions, during which - // the socket permissions are Less restrictive than desired. - // - // To work around this limitation of net.Listen(), we temporarily set the - // umask to 0777, which forces the socket to be created with 000 permissions - // (i.e.: no access for anyone). After that, WithChmod() must be used to set - // the desired permissions. - // - // We don't use "defer" here, to reset the umask to its original value as soon - // as possible. Ideally we'd be able to detect if WithChmod() was passed as - // an option, and skip changing umask if default permissions are used. - origUmask := syscall.Umask(0o777) - l, err := net.Listen("unix", path) - syscall.Umask(origUmask) + l, err := listenUnix(path) if err != nil { return nil, err } @@ -119,8 +82,3 @@ func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error return l, nil } - -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path string, gid int) (net.Listener, error) { - return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0o660)) -} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket_unix.go b/vendor/github.com/docker/go-connections/sockets/unix_socket_unix.go new file mode 100644 index 000000000..a41a71654 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket_unix.go @@ -0,0 +1,54 @@ +//go:build !windows + +package sockets + +import ( + "net" + "os" + "syscall" +) + +// WithChown modifies the socket file's uid and gid +func WithChown(uid, gid int) SockOption { + return func(path string) error { + if err := os.Chown(path, uid, gid); err != nil { + return err + } + return nil + } +} + +// WithChmod modifies socket file's access mode. +func WithChmod(mask os.FileMode) SockOption { + return func(path string) error { + if err := os.Chmod(path, mask); err != nil { + return err + } + return nil + } +} + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0o660)) +} + +func listenUnix(path string) (net.Listener, error) { + // net.Listen does not allow for permissions to be set. As a result, when + // specifying custom permissions ("WithChmod()"), there is a short time + // between creating the socket and applying the permissions, during which + // the socket permissions are Less restrictive than desired. + // + // To work around this limitation of net.Listen(), we temporarily set the + // umask to 0777, which forces the socket to be created with 000 permissions + // (i.e.: no access for anyone). After that, WithChmod() must be used to set + // the desired permissions. + // + // We don't use "defer" here, to reset the umask to its original value as soon + // as possible. Ideally we'd be able to detect if WithChmod() was passed as + // an option, and skip changing umask if default permissions are used. + origUmask := syscall.Umask(0o777) + l, err := net.Listen("unix", path) + syscall.Umask(origUmask) + return l, err +} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket_windows.go b/vendor/github.com/docker/go-connections/sockets/unix_socket_windows.go new file mode 100644 index 000000000..5ec29e059 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket_windows.go @@ -0,0 +1,7 @@ +package sockets + +import "net" + +func listenUnix(path string) (net.Listener, error) { + return net.Listen("unix", path) +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 606c98a38..8b0264f68 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -34,51 +34,37 @@ type Options struct { // the system pool will be used. ExclusiveRootPools bool MinVersion uint16 - // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted. - // - // Deprecated: Use of encrypted TLS private keys has been deprecated, and - // will be removed in a future release. Golang has deprecated support for - // legacy PEM encryption (as specified in RFC 1423), as it is insecure by - // design (see https://go-review.googlesource.com/c/go/+/264159). - Passphrase string -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, } // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls // options struct but wants to use a commonly accepted set of TLS cipher suites, with // known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) +var DefaultServerAcceptedCiphers = defaultCipherSuites + +// defaultCipherSuites is shared by both client and server as the default set. +var defaultCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsConfig := &tls.Config{ - // Avoid fallback by default to SSL protocols < TLS1.2 - MinVersion: tls.VersionTLS12, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, - } - - for _, op := range ops { - op(tlsConfig) - } - - return tlsConfig + return defaultConfig(ops...) } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + return defaultConfig(ops...) +} + +// defaultConfig is the default config used by both client and server TLS configuration. +func defaultConfig(ops ...func(*tls.Config)) *tls.Config { tlsConfig := &tls.Config{ - // Prefer TLS1.2 as the client minimum + // Avoid fallback by default to SSL protocols < TLS1.2 MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, + CipherSuites: defaultCipherSuites, } for _, op := range ops { @@ -92,13 +78,13 @@ func ClientDefault(ops ...func(*tls.Config)) *tls.Config { func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { // If we should verify the server, we need to load a trusted ca var ( - certPool *x509.CertPool - err error + pool *x509.CertPool + err error ) if exclusivePool { - certPool = x509.NewCertPool() + pool = x509.NewCertPool() } else { - certPool, err = SystemCertPool() + pool, err = SystemCertPool() if err != nil { return nil, fmt.Errorf("failed to read system certificates: %v", err) } @@ -107,10 +93,10 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } - if !certPool.AppendCertsFromPEM(pemData) { + if !pool.AppendCertsFromPEM(pemData) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } - return certPool, nil + return pool, nil } // allTLSVersions lists all the TLS versions and is used by the code that validates @@ -144,34 +130,32 @@ func adjustMinVersion(options Options, config *tls.Config) error { return nil } -// IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when trying to decrypt a TLS private key. +// errEncryptedKeyDeprecated is produced when we encounter an encrypted +// (password-protected) key. From https://go-review.googlesource.com/c/go/+/264159; // -// Deprecated: Use of encrypted TLS private keys has been deprecated, and -// will be removed in a future release. Golang has deprecated support for -// legacy PEM encryption (as specified in RFC 1423), as it is insecure by -// design (see https://go-review.googlesource.com/c/go/+/264159). -func IsErrEncryptedKey(err error) bool { - return errors.Is(err, x509.IncorrectPasswordError) -} +// > Legacy PEM encryption as specified in RFC 1423 is insecure by design. Since +// > it does not authenticate the ciphertext, it is vulnerable to padding oracle +// > attacks that can let an attacker recover the plaintext +// > +// > It's unfortunate that we don't implement PKCS#8 encryption so we can't +// > recommend an alternative but PEM encryption is so broken that it's worth +// > deprecating outright. +// +// Also see https://docs.docker.com/go/deprecated/ +var errEncryptedKeyDeprecated = errors.New("private key is encrypted; encrypted private keys are obsolete, and not supported") // getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. -// If the private key is encrypted, 'passphrase' is used to decrypted the -// private key. -func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { +// It returns an error if the file could not be decoded or was protected by +// a passphrase. +func getPrivateKey(keyBytes []byte) ([]byte, error) { // this section makes some small changes to code from notary/tuf/utils/x509.go pemBlock, _ := pem.Decode(keyBytes) if pemBlock == nil { return nil, fmt.Errorf("no valid private key found") } - var err error if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) - if err != nil { - return nil, fmt.Errorf("private key is encrypted, but could not decrypt it: %w", err) - } - keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + return nil, errEncryptedKeyDeprecated } return keyBytes, nil @@ -195,7 +179,7 @@ func getCert(options Options) ([]tls.Certificate, error) { return nil, err } - prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + prKeyBytes, err = getPrivateKey(prKeyBytes) if err != nil { return nil, err } @@ -210,7 +194,7 @@ func getCert(options Options) ([]tls.Certificate, error) { // Client returns a TLS configuration meant to be used by a client. func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault() + tlsConfig := defaultConfig() tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify if !options.InsecureSkipVerify && options.CAFile != "" { CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) @@ -235,7 +219,7 @@ func Client(options Options) (*tls.Config, error) { // Server returns a TLS configuration meant to be used by a server. func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault() + tlsConfig := defaultConfig() tlsConfig.ClientAuth = options.ClientAuth tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go deleted file mode 100644 index a82f9fa52..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ /dev/null @@ -1,14 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7a2..000000000 --- a/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c208..000000000 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index f57de90da..000000000 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2013, The GoGo Authors. All rights reserved. - -Protocol Buffers for Go with Gadgets - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 00d65f327..000000000 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C test_proto - make -C proto3_proto - make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index a26b046d9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,258 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go deleted file mode 100644 index 24552483c..000000000 --- a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go +++ /dev/null @@ -1,39 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "reflect" - -type custom interface { - Marshal() ([]byte, error) - Unmarshal(data []byte) error - Size() int -} - -var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08be..000000000 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c09..000000000 --- a/vendor/github.com/gogo/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go deleted file mode 100644 index fe1bd7d90..000000000 --- a/vendor/github.com/gogo/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - default: // E.g., *pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c91c..000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index e748e1730..000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 9581ccd30..000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,205 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - sizVar := SizeVarint(uint64(siz)) - p.grow(siz + sizVar) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 0f5fb173e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,33 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1c1..000000000 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 341c6f57f..000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,605 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - case extensionsBytes: - return slowExtensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if ea, ok := pbi.(slowExtensionAdapter); ok { - pbi = ea.extensionsBytes - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, ok := pb.(extensionsBytes); ok { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - return decodeExtensionFromBytes(extension, *ext) - } - - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if cerr := checkExtensionTypes(epb, extension); cerr != nil { - return nil, cerr - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, ok := pb.(extensionsBytes); ok { - ClearExtension(pb, extension) - newb, err := encodeExtension(extension, value) - if err != nil { - return err - } - bb := epb.GetExtensions() - *bb = append(*bb, newb...) - return nil - } - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index 6f1ae120e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,389 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" -) - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -type slowExtensionAdapter struct { - extensionsBytes -} - -func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { - panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") -} - -func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - b := s.GetExtensions() - m, err := BytesToExtensionsMap(*b) - if err != nil { - panic(err) - } - return m, notLocker{} -} - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - if err := this.Encode(); err != nil { - return false - } - if err := that.Encode(); err != nil { - return false - } - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - if err := this.Encode(); err != nil { - return 1 - } - if err := that.Encode(); err != nil { - return -1 - } - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - info := getMarshalInfo(reflect.TypeOf(m)) - return info.sizeV1Extensions(m.extensionsWrite()) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMapBackwards(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[o:], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - o += n - } - return o, nil -} - -func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - end := len(data) - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[end-len(e.enc):], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - end -= n - o += n - } - return o, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - e := m[id] - if err := e.Encode(); err != nil { - return nil, err - } - return e.enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { - u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) - ei := u.getExtElemInfo(extension) - v := value - p := toAddrPointer(&v, ei.isptr) - siz := ei.sizer(p, SizeVarint(ei.wiretag)) - buf := make([]byte, 0, siz) - return ei.marshaler(buf, p, ei.wiretag, false) -} - -func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { - o := 0 - for o < len(buf) { - tag, n := DecodeVarint((buf)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - if o+n > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - l, err := size((buf)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - if o+n+l > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - v, err := decodeExtension((buf)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) -} - -func (this *Extension) Encode() error { - if this.enc == nil { - var err error - this.enc, err = encodeExtension(this.desc, this.value) - if err != nil { - return err - } - } - return nil -} - -func (this Extension) GoString() string { - if err := this.Encode(); err != nil { - return fmt.Sprintf("error encoding extension: %v", err) - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index 80db1c155..000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,973 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or T or []*T or []T - switch f.Kind() { - case reflect.Struct: - setDefaults(f, recur, zeros) - - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.Kind() == reflect.Ptr && e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Struct: - nestedMessage = true // non-nullable - - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr, reflect.Struct: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index b3aa39190..000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,50 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -type Sizer interface { - Size() int -} - -type ProtoSizer interface { - ProtoSize() int -} - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index f48a75676..000000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad9083..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 7ffd3c29d..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,59 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" -) - -// TODO: untested, so probably incorrect. - -func (p pointer) getRef() pointer { - return pointer{v: p.v.Addr()} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335d9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index aca8eed02..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,56 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func (p pointer) getRef() pointer { - return pointer{p: (unsafe.Pointer)(&p.p)} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index 28da1475f..000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,610 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - CastType string - StdTime bool - StdDuration bool - WktPointer bool - - stype reflect.Type // set for struct types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "casttype="): - p.CastType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - case f == "wktptr": - p.WktPointer = true - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.ctype = typ - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setTag(lockGetProp) - return - } - if p.WktPointer && !isMap { - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - case reflect.Struct: - p.stype = typ - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - case reflect.Struct: - p.stype = t3 - } - case reflect.Struct: - p.stype = t2 - } - - case reflect.Map: - - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.MapValProp.CustomType = p.CustomType - p.MapValProp.StdDuration = p.StdDuration - p.MapValProp.StdTime = p.StdTime - p.MapValProp.WktPointer = p.WktPointer - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - if isOneofMessage { - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index 40ea3dd93..000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,36 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() -var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93f7..000000000 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go deleted file mode 100644 index f8babdefa..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,3009 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements - - hassizer bool // has custom sizer - hasprotosizer bool // has custom protosizer - - bytesExtensions field // offset of XXX_extensions where the field type is []byte -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex - - uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - // Uses the message's Size method if available - if u.hassizer { - s := ptr.asPointerTo(u.typ).Interface().(Sizer) - return s.Size() - } - // Uses the message's ProtoSize method if available - if u.hasprotosizer { - s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) - return s.ProtoSize() - } - - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - n += len(s) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - b = append(b, s...) - } - for _, f := range u.fields { - if f.required { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.bytesExtensions = invalidField - u.sizecache = invalidField - isOneofMessage := false - - if reflect.PtrTo(t).Implements(sizerType) { - u.hassizer = true - } - if reflect.PtrTo(t).Implements(protosizerType) { - u.hasprotosizer = true - } - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Tag.Get("protobuf_oneof") != "" { - isOneofMessage = true - } - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - if f.Type.Kind() == reflect.Map { - u.v1extensions = toField(&f) - } else { - u.bytesExtensions = toField(&f) - } - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // get oneof implementers - var oneofImplementers []interface{} - // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler - if isOneofMessage { - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - ctype := false - isTime := false - isDuration := false - isWktPointer := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - if strings.HasPrefix(tags[i], "customtype=") { - ctype = true - } - if tags[i] == "stdtime" { - isTime = true - } - if tags[i] == "stdduration" { - isDuration = true - } - if tags[i] == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - if !proto3 && !pointer && !slice { - nozero = false - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - if pointer { - return makeCustomPtrMarshaler(getMarshalInfo(t)) - } - return makeCustomMarshaler(getMarshalInfo(t)) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeTimePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeTimePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeTimeSliceMarshaler(getMarshalInfo(t)) - } - return makeTimeMarshaler(getMarshalInfo(t)) - } - - if isDuration { - if pointer { - if slice { - return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationPtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeDurationSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationMarshaler(getMarshalInfo(t)) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValueMarshaler(getMarshalInfo(t)) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValueMarshaler(getMarshalInfo(t)) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValueMarshaler(getMarshalInfo(t)) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValueMarshaler(getMarshalInfo(t)) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValueMarshaler(getMarshalInfo(t)) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if pointer { - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } else { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageRefMarshaler(getMarshalInfo(t)) - } - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - tags := strings.Split(f.Tag.Get("protobuf"), ",") - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - stdOptions := false - for _, t := range tags { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "stdduration" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if p.deterministic { - if _, ok := pb.(Marshaler); ok { - return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) - } - } - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] - pp, err = m.XXX_Marshal(pp, p.deterministic) - p.buf = append(p.buf, pp...) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - var b []byte - b, err = m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go deleted file mode 100644 index 997f57c1e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go +++ /dev/null @@ -1,388 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -// makeMessageRefMarshaler differs a bit from makeMessageMarshaler -// It marshal a message T instead of a *T -func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - siz := u.size(ptr) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - b = appendVarint(b, wiretag) - siz := u.cachedsize(ptr) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, ptr, deterministic) - } -} - -// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler -// It marshals a slice of messages []T instead of []*T -func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - var err, errreq error - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - b = appendVarint(b, wiretag) - siz := u.size(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - - return b, errreq - } -} - -func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go deleted file mode 100644 index 60dcf70d1..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_merge.go +++ /dev/null @@ -1,676 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case isSlice && !isPointer: // E.g. []pb.T - mergeInfo := getMergeInfo(tf) - zero := reflect.Zero(tf) - mfi.merge = func(dst, src pointer) { - // TODO: Make this faster? - dstsp := dst.asPointerTo(f.Type) - dsts := dstsp.Elem() - srcs := src.asPointerTo(f.Type).Elem() - for i := 0; i < srcs.Len(); i++ { - dsts = reflect.Append(dsts, zero) - srcElement := srcs.Index(i).Addr() - dstElement := dsts.Index(dsts.Len() - 1).Addr() - mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) - } - if dsts.IsNil() { - dsts = reflect.MakeSlice(f.Type, 0, 0) - } - dstsp.Elem().Set(dsts) - } - case !isPointer: - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - mergeInfo.merge(dst, src) - } - case isSlice: // E.g., []*pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mergeInfo.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mergeInfo.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go deleted file mode 100644 index 937229386..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2249 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - if u.bytesExtensions.IsValid() { - z = m.offset(u.bytesExtensions).toBytes() - break - } - panic("no extensions field available") - } - } - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - u.bytesExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { - u.oldExtensions = toField(&f) - continue - } else if f.Type == reflect.TypeOf(([]byte)(nil)) { - u.bytesExtensions = toField(&f) - continue - } - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler - if len(oneofFields) > 0 { - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - ctype := false - isTime := false - isDuration := false - isWktPointer := false - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - if strings.HasPrefix(tag, "customtype=") { - ctype = true - } - if tag == "stdtime" { - isTime = true - } - if tag == "stdduration" { - isDuration = true - } - if tag == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) - } - if pointer { - return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalCustom(getUnmarshalInfo(t), name) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTime(getUnmarshalInfo(t), name) - } - - if isDuration { - if pointer { - if slice { - return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDuration(getUnmarshalInfo(t), name) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessage(getUnmarshalInfo(t), name) - } - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - tagArray := strings.Split(f.Tag.Get("protobuf"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - for _, t := range tagArray { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - } - if t == "stdduration" { - valTags = append(valTags, t) - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go deleted file mode 100644 index 00d6c7ad9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go +++ /dev/null @@ -1,385 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f // gogo: changed from v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.New(sub.typ)) - m := s.Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := reflect.New(sub.typ) - c := m.Interface().(custom) - if err := c.Unmarshal(b[:x]); err != nil { - return nil, err - } - v := valToPointer(m) - f.appendRef(v, sub.typ) - return b[x:], nil - } -} - -func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - m := f.asPointerTo(sub.typ).Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&d)) - return b[x:], nil - } -} - -func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(d)) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&d)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(d)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index 87416afe9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,930 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if len(props.CastType) > 0 { - if _, ok := v.Interface().(interface { - String() string - }); ok { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - _, err := fmt.Fprintf(w, "%d", v.Interface()) - return err - } - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if v.Type().Implements(textMarshalerType) { - text, err := v.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa0e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index f85c0cc81..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(rune(i)), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int8: - if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int16: - if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint8: - if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint16: - if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f6542..000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index 38439fa99..000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go deleted file mode 100644 index b175d1b64..000000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers.go +++ /dev/null @@ -1,1888 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go deleted file mode 100644 index c1cf7bf85..000000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go +++ /dev/null @@ -1,113 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -type float64Value struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float64Value) Reset() { *m = float64Value{} } -func (*float64Value) ProtoMessage() {} -func (*float64Value) String() string { return "float64" } - -type float32Value struct { - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float32Value) Reset() { *m = float32Value{} } -func (*float32Value) ProtoMessage() {} -func (*float32Value) String() string { return "float32" } - -type int64Value struct { - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int64Value) Reset() { *m = int64Value{} } -func (*int64Value) ProtoMessage() {} -func (*int64Value) String() string { return "int64" } - -type uint64Value struct { - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint64Value) Reset() { *m = uint64Value{} } -func (*uint64Value) ProtoMessage() {} -func (*uint64Value) String() string { return "uint64" } - -type int32Value struct { - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int32Value) Reset() { *m = int32Value{} } -func (*int32Value) ProtoMessage() {} -func (*int32Value) String() string { return "int32" } - -type uint32Value struct { - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint32Value) Reset() { *m = uint32Value{} } -func (*uint32Value) ProtoMessage() {} -func (*uint32Value) String() string { return "uint32" } - -type boolValue struct { - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *boolValue) Reset() { *m = boolValue{} } -func (*boolValue) ProtoMessage() {} -func (*boolValue) String() string { return "bool" } - -type stringValue struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *stringValue) Reset() { *m = stringValue{} } -func (*stringValue) ProtoMessage() {} -func (*stringValue) String() string { return "string" } - -type bytesValue struct { - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *bytesValue) Reset() { *m = bytesValue{} } -func (*bytesValue) ProtoMessage() {} -func (*bytesValue) String() string { return "[]byte" } - -func init() { - RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") - RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") - RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") - RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") - RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") - RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") - RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") - RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") - RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") -} diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/moby/moby/api/LICENSE similarity index 100% rename from vendor/github.com/docker/docker/LICENSE rename to vendor/github.com/moby/moby/api/LICENSE diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go similarity index 85% rename from vendor/github.com/docker/docker/api/types/blkiodev/blkio.go rename to vendor/github.com/moby/moby/api/types/blkiodev/blkio.go index bf3463b90..931ae10ab 100644 --- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ b/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go @@ -1,4 +1,4 @@ -package blkiodev // import "github.com/docker/docker/api/types/blkiodev" +package blkiodev import "fmt" diff --git a/vendor/github.com/moby/moby/api/types/build/build.go b/vendor/github.com/moby/moby/api/types/build/build.go new file mode 100644 index 000000000..db9839773 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/build/build.go @@ -0,0 +1,16 @@ +package build + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit BuilderVersion = "2" +) + +// Result contains the image id of a successful build. +type Result struct { + ID string +} diff --git a/vendor/github.com/docker/docker/api/types/build/cache.go b/vendor/github.com/moby/moby/api/types/build/cache.go similarity index 66% rename from vendor/github.com/docker/docker/api/types/build/cache.go rename to vendor/github.com/moby/moby/api/types/build/cache.go index 42c840457..39dd23a5f 100644 --- a/vendor/github.com/docker/docker/api/types/build/cache.go +++ b/vendor/github.com/moby/moby/api/types/build/cache.go @@ -2,18 +2,12 @@ package build import ( "time" - - "github.com/docker/docker/api/types/filters" ) // CacheRecord contains information about a build cache record. type CacheRecord struct { // ID is the unique ID of the build cache record. ID string - // Parent is the ID of the parent build cache record. - // - // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. - Parent string `json:"Parent,omitempty"` // Parents is the list of parent build cache record IDs. Parents []string `json:" Parents,omitempty"` // Type is the cache record type. @@ -33,17 +27,6 @@ type CacheRecord struct { UsageCount int } -// CachePruneOptions hold parameters to prune the build cache. -type CachePruneOptions struct { - All bool - ReservedSpace int64 - MaxUsedSpace int64 - MinFreeSpace int64 - Filters filters.Args - - KeepStorage int64 // Deprecated: deprecated in API 1.48. -} - // CachePruneReport contains the response for Engine API: // POST "/build/prune" type CachePruneReport struct { diff --git a/vendor/github.com/moby/moby/api/types/build/disk_usage.go b/vendor/github.com/moby/moby/api/types/build/disk_usage.go new file mode 100644 index 000000000..3613797db --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/build/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package build + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage for build cache resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active build cache records. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of build cache records. + // + Items []CacheRecord `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing inactive build cache records. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all build cache records. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by build cache records. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/checkpoint/create_request.go b/vendor/github.com/moby/moby/api/types/checkpoint/create_request.go new file mode 100644 index 000000000..c363783f2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/checkpoint/create_request.go @@ -0,0 +1,8 @@ +package checkpoint + +// CreateRequest holds parameters to create a checkpoint from a container. +type CreateRequest struct { + CheckpointID string + CheckpointDir string + Exit bool +} diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/list.go b/vendor/github.com/moby/moby/api/types/checkpoint/list.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/checkpoint/list.go rename to vendor/github.com/moby/moby/api/types/checkpoint/list.go diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/moby/moby/api/types/common/error_response.go similarity index 73% rename from vendor/github.com/docker/docker/api/types/error_response.go rename to vendor/github.com/moby/moby/api/types/common/error_response.go index dc942d9d9..b49d3eea0 100644 --- a/vendor/github.com/docker/docker/api/types/error_response.go +++ b/vendor/github.com/moby/moby/api/types/common/error_response.go @@ -1,9 +1,13 @@ -package types +// Code generated by go-swagger; DO NOT EDIT. + +package common // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // ErrorResponse Represents an error. +// Example: {"message":"Something went wrong."} +// // swagger:model ErrorResponse type ErrorResponse struct { diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/moby/moby/api/types/common/error_response_ext.go similarity index 86% rename from vendor/github.com/docker/docker/api/types/error_response_ext.go rename to vendor/github.com/moby/moby/api/types/common/error_response_ext.go index f84f034cd..c92dfe4b1 100644 --- a/vendor/github.com/docker/docker/api/types/error_response_ext.go +++ b/vendor/github.com/moby/moby/api/types/common/error_response_ext.go @@ -1,4 +1,4 @@ -package types +package common // Error returns the error message func (e ErrorResponse) Error() string { diff --git a/vendor/github.com/docker/docker/api/types/common/id_response.go b/vendor/github.com/moby/moby/api/types/common/id_response.go similarity index 87% rename from vendor/github.com/docker/docker/api/types/common/id_response.go rename to vendor/github.com/moby/moby/api/types/common/id_response.go index 22e8c60a4..7dfe4bf12 100644 --- a/vendor/github.com/docker/docker/api/types/common/id_response.go +++ b/vendor/github.com/moby/moby/api/types/common/id_response.go @@ -1,9 +1,12 @@ +// Code generated by go-swagger; DO NOT EDIT. + package common // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // IDResponse Response to an API call that returns just an Id +// // swagger:model IDResponse type IDResponse struct { diff --git a/vendor/github.com/docker/docker/api/types/container/change_type.go b/vendor/github.com/moby/moby/api/types/container/change_type.go similarity index 87% rename from vendor/github.com/docker/docker/api/types/container/change_type.go rename to vendor/github.com/moby/moby/api/types/container/change_type.go index fe8d6d369..52fc99235 100644 --- a/vendor/github.com/docker/docker/api/types/container/change_type.go +++ b/vendor/github.com/moby/moby/api/types/container/change_type.go @@ -1,3 +1,5 @@ +// Code generated by go-swagger; DO NOT EDIT. + package container // This file was generated by the swagger tool. diff --git a/vendor/github.com/docker/docker/api/types/container/change_types.go b/vendor/github.com/moby/moby/api/types/container/change_types.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/container/change_types.go rename to vendor/github.com/moby/moby/api/types/container/change_types.go diff --git a/vendor/github.com/docker/docker/api/types/container/commit.go b/vendor/github.com/moby/moby/api/types/container/commit.go similarity index 76% rename from vendor/github.com/docker/docker/api/types/container/commit.go rename to vendor/github.com/moby/moby/api/types/container/commit.go index 6fd1b0ead..c5aab26ff 100644 --- a/vendor/github.com/docker/docker/api/types/container/commit.go +++ b/vendor/github.com/moby/moby/api/types/container/commit.go @@ -1,6 +1,6 @@ package container -import "github.com/docker/docker/api/types/common" +import "github.com/moby/moby/api/types/common" // CommitResponse response for the commit API call, containing the ID of the // image that was produced. diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/moby/moby/api/types/container/config.go similarity index 57% rename from vendor/github.com/docker/docker/api/types/container/config.go rename to vendor/github.com/moby/moby/api/types/container/config.go index d6b03e8b2..78fa9f910 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/moby/moby/api/types/container/config.go @@ -1,11 +1,10 @@ -package container // import "github.com/docker/docker/api/types/container" +package container import ( "time" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" dockerspec "github.com/moby/docker-image-spec/specs-go/v1" + "github.com/moby/moby/api/types/network" ) // MinimumDuration puts a minimum on user configured duration. @@ -14,24 +13,6 @@ import ( // Docker interprets it as 3 nanoseconds. const MinimumDuration = 1 * time.Millisecond -// StopOptions holds the options to stop or restart a container. -type StopOptions struct { - // Signal (optional) is the signal to send to the container to (gracefully) - // stop it before forcibly terminating the container with SIGKILL after the - // timeout expires. If not value is set, the default (SIGTERM) is used. - Signal string `json:",omitempty"` - - // Timeout (optional) is the timeout (in seconds) to wait for the container - // to stop gracefully before forcibly terminating it with SIGKILL. - // - // - Use nil to use the default timeout (10 seconds). - // - Use '-1' to wait indefinitely. - // - Use '0' to not wait for the container to exit gracefully, and - // immediately proceeds to forcibly terminating the container. - // - Other positive values are used as timeout (in seconds). - Timeout *int `json:",omitempty"` -} - // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig = dockerspec.HealthcheckConfig @@ -48,26 +29,22 @@ type Config struct { AttachStdin bool // Attach the standard input, makes possible user interaction AttachStdout bool // Attach the standard output AttachStderr bool // Attach the standard error - ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + ExposedPorts network.PortSet `json:",omitempty"` // List of exposed ports Tty bool // Attach standard streams to a tty, including stdin if it is not closed. OpenStdin bool // Open stdin StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container + Cmd []string // Command to run when starting the container Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) Volumes map[string]struct{} // List of volumes (mounts) used for the container WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + Entrypoint []string // Entrypoint to run when starting the container NetworkDisabled bool `json:",omitempty"` // Is network disabled - // Mac Address of the container. - // - // Deprecated: this field is deprecated since API v1.44. Use EndpointSettings.MacAddress instead. - MacAddress string `json:",omitempty"` - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT + OnBuild []string `json:",omitempty"` // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell []string `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT } diff --git a/vendor/github.com/docker/docker/api/types/container/container.go b/vendor/github.com/moby/moby/api/types/container/container.go similarity index 69% rename from vendor/github.com/docker/docker/api/types/container/container.go rename to vendor/github.com/moby/moby/api/types/container/container.go index a191ca8bd..bffb3de87 100644 --- a/vendor/github.com/docker/docker/api/types/container/container.go +++ b/vendor/github.com/moby/moby/api/types/container/container.go @@ -1,25 +1,14 @@ package container import ( - "io" "os" "time" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/storage" + "github.com/moby/moby/api/types/mount" + "github.com/moby/moby/api/types/storage" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) -// ContainerUpdateOKBody OK response to ContainerUpdate operation -// -// Deprecated: use [UpdateResponse]. This alias will be removed in the next release. -type ContainerUpdateOKBody = UpdateResponse - -// ContainerTopOKBody OK response to ContainerTop operation -// -// Deprecated: use [TopResponse]. This alias will be removed in the next release. -type ContainerTopOKBody = TopResponse - // PruneReport contains the response for Engine API: // POST "/containers/prune" type PruneReport struct { @@ -38,30 +27,10 @@ type PathStat struct { LinkTarget string `json:"linkTarget"` } -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool - CopyUIDGID bool -} - -// StatsResponseReader wraps an io.ReadCloser to read (a stream of) stats -// for a container, as produced by the GET "/stats" endpoint. -// -// The OSType field is set to the server's platform to allow -// platform-specific handling of the response. -// -// TODO(thaJeztah): remove this wrapper, and make OSType part of [StatsResponse]. -type StatsResponseReader struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} - // MountPoint represents a mount point configuration inside the container. // This is used for reporting the mountpoints in use by a container. type MountPoint struct { - // Type is the type of mount, see `Type` definitions in - // github.com/docker/docker/api/types/mount.Type + // Type is the type of mount, see [mount.Type] definitions for details. Type mount.Type `json:",omitempty"` // Name is the name reference to the underlying data defined by `Source` @@ -128,7 +97,7 @@ type Summary struct { ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"` Command string Created int64 - Ports []Port + Ports []PortSummary SizeRw int64 `json:",omitempty"` SizeRootFs int64 `json:",omitempty"` Labels map[string]string @@ -138,20 +107,14 @@ type Summary struct { NetworkMode string `json:",omitempty"` Annotations map[string]string `json:",omitempty"` } + Health *HealthSummary `json:",omitempty"` NetworkSettings *NetworkSettingsSummary Mounts []MountPoint } -// ContainerJSONBase contains response of Engine API GET "/containers/{name:.*}/json" -// for API version 1.18 and older. -// -// TODO(thaJeztah): combine ContainerJSONBase and InspectResponse into a single struct. -// The split between ContainerJSONBase (ContainerJSONBase) and InspectResponse (InspectResponse) -// was done in commit 6deaa58ba5f051039643cedceee97c8695e2af74 (https://github.com/moby/moby/pull/13675). -// ContainerJSONBase contained all fields for API < 1.19, and InspectResponse -// held fields that were added in API 1.19 and up. Given that the minimum -// supported API version is now 1.24, we no longer use the separate type. -type ContainerJSONBase struct { +// InspectResponse is the response for the GET "/containers/{name:.*}/json" +// endpoint. +type InspectResponse struct { ID string `json:"Id"` Created string Path string @@ -171,15 +134,15 @@ type ContainerJSONBase struct { AppArmorProfile string ExecIDs []string HostConfig *HostConfig - GraphDriver storage.DriverData + + // GraphDriver contains information about the container's graph driver. + GraphDriver *storage.DriverData `json:"GraphDriver,omitempty"` + + // Storage contains information about the storage used for the container's filesystem. + Storage *storage.Storage `json:"Storage,omitempty"` + SizeRw *int64 `json:",omitempty"` SizeRootFs *int64 `json:",omitempty"` -} - -// InspectResponse is the response for the GET "/containers/{name:.*}/json" -// endpoint. -type InspectResponse struct { - *ContainerJSONBase Mounts []MountPoint Config *Config NetworkSettings *NetworkSettings diff --git a/vendor/github.com/docker/docker/api/types/container/create_request.go b/vendor/github.com/moby/moby/api/types/container/create_request.go similarity index 89% rename from vendor/github.com/docker/docker/api/types/container/create_request.go rename to vendor/github.com/moby/moby/api/types/container/create_request.go index e98dd6ad4..decb208af 100644 --- a/vendor/github.com/docker/docker/api/types/container/create_request.go +++ b/vendor/github.com/moby/moby/api/types/container/create_request.go @@ -1,6 +1,6 @@ package container -import "github.com/docker/docker/api/types/network" +import "github.com/moby/moby/api/types/network" // CreateRequest is the request message sent to the server for container // create calls. It is a config wrapper that holds the container [Config] diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/moby/moby/api/types/container/create_response.go similarity index 70% rename from vendor/github.com/docker/docker/api/types/container/create_response.go rename to vendor/github.com/moby/moby/api/types/container/create_response.go index aa0e7f7d0..39d761aa9 100644 --- a/vendor/github.com/docker/docker/api/types/container/create_response.go +++ b/vendor/github.com/moby/moby/api/types/container/create_response.go @@ -1,3 +1,5 @@ +// Code generated by go-swagger; DO NOT EDIT. + package container // This file was generated by the swagger tool. @@ -5,15 +7,18 @@ package container // CreateResponse ContainerCreateResponse // -// OK response to ContainerCreate operation +// # OK response to ContainerCreate operation +// // swagger:model CreateResponse type CreateResponse struct { // The ID of the created container + // Example: ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743 // Required: true ID string `json:"Id"` // Warnings encountered when creating the container + // Example: [] // Required: true Warnings []string `json:"Warnings"` } diff --git a/vendor/github.com/moby/moby/api/types/container/disk_usage.go b/vendor/github.com/moby/moby/api/types/container/disk_usage.go new file mode 100644 index 000000000..c36721d3b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage information for container resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active containers. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of container summaries. + // + Items []Summary `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing inactive containers. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all containers. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by containers. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/errors.go b/vendor/github.com/moby/moby/api/types/container/errors.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/container/errors.go rename to vendor/github.com/moby/moby/api/types/container/errors.go diff --git a/vendor/github.com/moby/moby/api/types/container/exec.go b/vendor/github.com/moby/moby/api/types/container/exec.go new file mode 100644 index 000000000..6895926ae --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/exec.go @@ -0,0 +1,35 @@ +package container + +import "github.com/moby/moby/api/types/common" + +// ExecCreateResponse is the response for a successful exec-create request. +// It holds the ID of the exec that was created. +// +// TODO(thaJeztah): make this a distinct type. +type ExecCreateResponse = common.IDResponse + +// ExecInspectResponse is the API response for the "GET /exec/{id}/json" +// endpoint and holds information about and exec. +type ExecInspectResponse struct { + ID string `json:"ID"` + Running bool `json:"Running"` + ExitCode *int `json:"ExitCode"` + ProcessConfig *ExecProcessConfig + OpenStdin bool `json:"OpenStdin"` + OpenStderr bool `json:"OpenStderr"` + OpenStdout bool `json:"OpenStdout"` + CanRemove bool `json:"CanRemove"` + ContainerID string `json:"ContainerID"` + DetachKeys []byte `json:"DetachKeys"` + Pid int `json:"Pid"` +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/exec_create_request.go b/vendor/github.com/moby/moby/api/types/container/exec_create_request.go new file mode 100644 index 000000000..dd7437cd2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/exec_create_request.go @@ -0,0 +1,17 @@ +package container + +// ExecCreateRequest is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecCreateRequest struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} diff --git a/vendor/github.com/moby/moby/api/types/container/exec_start_request.go b/vendor/github.com/moby/moby/api/types/container/exec_start_request.go new file mode 100644 index 000000000..4c2ba0a77 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/exec_start_request.go @@ -0,0 +1,12 @@ +package container + +// ExecStartRequest is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartRequest struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool + // Terminal size [height, width], unused if Tty == false + ConsoleSize *[2]uint `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/filesystem_change.go b/vendor/github.com/moby/moby/api/types/container/filesystem_change.go similarity index 90% rename from vendor/github.com/docker/docker/api/types/container/filesystem_change.go rename to vendor/github.com/moby/moby/api/types/container/filesystem_change.go index 9e9c2ad1d..b9ec83e52 100644 --- a/vendor/github.com/docker/docker/api/types/container/filesystem_change.go +++ b/vendor/github.com/moby/moby/api/types/container/filesystem_change.go @@ -1,3 +1,5 @@ +// Code generated by go-swagger; DO NOT EDIT. + package container // This file was generated by the swagger tool. diff --git a/vendor/github.com/docker/docker/api/types/container/health.go b/vendor/github.com/moby/moby/api/types/container/health.go similarity index 81% rename from vendor/github.com/docker/docker/api/types/container/health.go rename to vendor/github.com/moby/moby/api/types/container/health.go index 96e91cc8d..1a1ba84b4 100644 --- a/vendor/github.com/docker/docker/api/types/container/health.go +++ b/vendor/github.com/moby/moby/api/types/container/health.go @@ -7,9 +7,7 @@ import ( ) // HealthStatus is a string representation of the container's health. -// -// It currently is an alias for string, but may become a distinct type in future. -type HealthStatus = string +type HealthStatus string // Health states const ( @@ -26,6 +24,12 @@ type Health struct { Log []*HealthcheckResult // Log contains the last few results (oldest first) } +// HealthSummary stores a summary of the container's healthcheck results. +type HealthSummary struct { + Status HealthStatus // Status is one of [NoHealthcheck], [Starting], [Healthy] or [Unhealthy]. + FailingStreak int // FailingStreak is the number of consecutive failures +} + // HealthcheckResult stores information about a single run of a healthcheck probe type HealthcheckResult struct { Start time.Time // Start is the time this check started @@ -35,7 +39,10 @@ type HealthcheckResult struct { } var validHealths = []string{ - NoHealthcheck, Starting, Healthy, Unhealthy, + string(NoHealthcheck), + string(Starting), + string(Healthy), + string(Unhealthy), } // ValidateHealthStatus checks if the provided string is a valid diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/moby/moby/api/types/container/hostconfig.go similarity index 93% rename from vendor/github.com/docker/docker/api/types/container/hostconfig.go rename to vendor/github.com/moby/moby/api/types/container/hostconfig.go index 87ca82683..0f889c651 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig.go @@ -1,16 +1,15 @@ -package container // import "github.com/docker/docker/api/types/container" +package container import ( "errors" "fmt" + "net/netip" "strings" - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" "github.com/docker/go-units" + "github.com/moby/moby/api/types/blkiodev" + "github.com/moby/moby/api/types/mount" + "github.com/moby/moby/api/types/network" ) // CgroupnsMode represents the cgroup namespace mode of the container @@ -391,17 +390,12 @@ type Resources struct { Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup DeviceRequests []DeviceRequest // List of device requests for device drivers - - // KernelMemory specifies the kernel memory limit (in bytes) for the container. - // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. - KernelMemory int64 `json:",omitempty"` - KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*Ulimit // List of ulimits to be set in the container + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*Ulimit // List of ulimits to be set in the container // Applicable to Windows CPUCount int64 `json:"CpuCount"` // CPU count @@ -427,7 +421,7 @@ type HostConfig struct { ContainerIDFile string // File (path) where the containerId is written LogConfig LogConfig // Configuration of the logs for this container NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + PortBindings network.PortMap // Port mapping between the exposed port (container) and the host RestartPolicy RestartPolicy // Restart policy to be used for the container AutoRemove bool // Automatically remove container when it exits VolumeDriver string // Name of the volume driver used to mount volumes @@ -436,10 +430,10 @@ type HostConfig struct { Annotations map[string]string `json:",omitempty"` // Arbitrary non-identifying metadata attached to container and provided to the runtime // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + CapAdd []string // List of kernel capabilities to add to the container + CapDrop []string // List of kernel capabilities to remove from the container CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container - DNS []string `json:"Dns"` // List of DNS server to lookup + DNS []netip.Addr `json:"Dns"` // List of DNS server to lookup DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for ExtraHosts []string // List of extra hosts diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go similarity index 89% rename from vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go rename to vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go index cdee49ea3..326a5da7e 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go @@ -1,8 +1,8 @@ //go:build !windows -package container // import "github.com/docker/docker/api/types/container" +package container -import "github.com/docker/docker/api/types/network" +import "github.com/moby/moby/api/types/network" // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go similarity index 90% rename from vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go rename to vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go index f08545542..977a37602 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go @@ -1,6 +1,6 @@ -package container // import "github.com/docker/docker/api/types/container" +package container -import "github.com/docker/docker/api/types/network" +import "github.com/moby/moby/api/types/network" // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { diff --git a/vendor/github.com/moby/moby/api/types/container/network_settings.go b/vendor/github.com/moby/moby/api/types/container/network_settings.go new file mode 100644 index 000000000..c51c0839d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/network_settings.go @@ -0,0 +1,22 @@ +package container + +import ( + "github.com/moby/moby/api/types/network" +) + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + SandboxID string // SandboxID uniquely represents a container's network stack + SandboxKey string // SandboxKey identifies the sandbox + + // Ports is a collection of [network.PortBinding] indexed by [network.Port] + Ports network.PortMap + + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsSummary provides a summary of container's networks +// in /containers/json +type NetworkSettingsSummary struct { + Networks map[string]*network.EndpointSettings +} diff --git a/vendor/github.com/docker/docker/api/types/container/port.go b/vendor/github.com/moby/moby/api/types/container/port_summary.go similarity index 56% rename from vendor/github.com/docker/docker/api/types/container/port.go rename to vendor/github.com/moby/moby/api/types/container/port_summary.go index 895043cfe..68148eece 100644 --- a/vendor/github.com/docker/docker/api/types/container/port.go +++ b/vendor/github.com/moby/moby/api/types/container/port_summary.go @@ -1,14 +1,23 @@ +// Code generated by go-swagger; DO NOT EDIT. + package container // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// Port An open port on a container -// swagger:model Port -type Port struct { +import ( + "net/netip" +) + +// PortSummary Describes a port-mapping between the container and the host. +// +// Example: {"PrivatePort":8080,"PublicPort":80,"Type":"tcp"} +// +// swagger:model PortSummary +type PortSummary struct { // Host IP address that the container's port is mapped to - IP string `json:"IP,omitempty"` + IP netip.Addr `json:"IP,omitempty"` // Port on the container // Required: true @@ -19,5 +28,6 @@ type Port struct { // type // Required: true + // Enum: ["tcp","udp","sctp"] Type string `json:"Type"` } diff --git a/vendor/github.com/docker/docker/api/types/container/state.go b/vendor/github.com/moby/moby/api/types/container/state.go similarity index 59% rename from vendor/github.com/docker/docker/api/types/container/state.go rename to vendor/github.com/moby/moby/api/types/container/state.go index 78d5c4fe8..47c6d1249 100644 --- a/vendor/github.com/docker/docker/api/types/container/state.go +++ b/vendor/github.com/moby/moby/api/types/container/state.go @@ -6,9 +6,7 @@ import ( ) // ContainerState is a string representation of the container's current state. -// -// It currently is an alias for string, but may become a distinct type in the future. -type ContainerState = string +type ContainerState string const ( StateCreated ContainerState = "created" // StateCreated indicates the container is created, but not (yet) started. @@ -20,8 +18,14 @@ const ( StateDead ContainerState = "dead" // StateDead indicates that the container failed to be deleted. Containers in this state are attempted to be cleaned up when the daemon restarts. ) -var validStates = []ContainerState{ - StateCreated, StateRunning, StatePaused, StateRestarting, StateRemoving, StateExited, StateDead, +var validStates = []string{ + string(StateCreated), + string(StateRunning), + string(StatePaused), + string(StateRestarting), + string(StateRemoving), + string(StateExited), + string(StateDead), } // ValidateContainerState checks if the provided string is a valid @@ -34,31 +38,3 @@ func ValidateContainerState(s ContainerState) error { return errInvalidParameter{error: fmt.Errorf("invalid value for state (%s): must be one of %s", s, strings.Join(validStates, ", "))} } } - -// StateStatus is used to return container wait results. -// Implements exec.ExitCode interface. -// This type is needed as State include a sync.Mutex field which make -// copying it unsafe. -type StateStatus struct { - exitCode int - err error -} - -// ExitCode returns current exitcode for the state. -func (s StateStatus) ExitCode() int { - return s.exitCode -} - -// Err returns current error for the state. Returns nil if the container had -// exited on its own. -func (s StateStatus) Err() error { - return s.err -} - -// NewStateStatus returns a new StateStatus with the given exit code and error. -func NewStateStatus(exitCode int, err error) StateStatus { - return StateStatus{ - exitCode: exitCode, - err: err, - } -} diff --git a/vendor/github.com/docker/docker/api/types/container/stats.go b/vendor/github.com/moby/moby/api/types/container/stats.go similarity index 67% rename from vendor/github.com/docker/docker/api/types/container/stats.go rename to vendor/github.com/moby/moby/api/types/container/stats.go index 3bfeb4849..6a34f6ab7 100644 --- a/vendor/github.com/docker/docker/api/types/container/stats.go +++ b/vendor/github.com/moby/moby/api/types/container/stats.go @@ -147,31 +147,78 @@ type PidsStats struct { Limit uint64 `json:"limit,omitempty"` } -// Stats is Ultimate struct aggregating all types of stats of one container -// -// Deprecated: use [StatsResponse] instead. This type will be removed in the next release. -type Stats = StatsResponse - // StatsResponse aggregates all types of stats of one container. type StatsResponse struct { - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` + // ID is the ID of the container for which the stats were collected. + ID string `json:"id,omitempty"` - // Common stats - Read time.Time `json:"read"` - PreRead time.Time `json:"preread"` + // Name is the name of the container for which the stats were collected. + Name string `json:"name,omitempty"` - // Linux specific stats, not populated on Windows. - PidsStats PidsStats `json:"pids_stats,omitempty"` + // OSType is the OS of the container ("linux" or "windows") to allow + // platform-specific handling of stats. + OSType string `json:"os_type,omitempty"` + + // Read is the date and time at which this sample was collected. + Read time.Time `json:"read"` + + // CPUStats contains CPU related info of the container. + CPUStats CPUStats `json:"cpu_stats,omitempty"` + + // MemoryStats aggregates all memory stats since container inception on Linux. + // Windows returns stats for commit and private working set only. + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + + // Networks contains Nntwork statistics for the container per interface. + // + // This field is omitted if the container has no networking enabled. + Networks map[string]NetworkStats `json:"networks,omitempty"` + + // ------------------------------------------------------------------------- + // Linux-specific stats, not populated on Windows. + // ------------------------------------------------------------------------- + + // PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + // + // This field is Linux-specific and omitted for Windows containers. + PidsStats PidsStats `json:"pids_stats,omitempty"` + + // BlkioStats stores all IO service stats for data read and write. + // + // This type is Linux-specific and holds many fields that are specific + // to cgroups v1. + // + // On a cgroup v2 host, all fields other than "io_service_bytes_recursive" + // are omitted or "null". + // + // This type is only populated on Linux and omitted for Windows containers. BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - // Windows specific stats, not populated on Linux. - NumProcs uint32 `json:"num_procs"` + // ------------------------------------------------------------------------- + // Windows-specific stats, not populated on Linux. + // ------------------------------------------------------------------------- + + // NumProcs is the number of processors on the system. + // + // This field is Windows-specific and always zero for Linux containers. + NumProcs uint32 `json:"num_procs"` + + // StorageStats is the disk I/O stats for read/write on Windows. + // + // This type is Windows-specific and omitted for Linux containers. StorageStats StorageStats `json:"storage_stats,omitempty"` - // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` - Networks map[string]NetworkStats `json:"networks,omitempty"` + // ------------------------------------------------------------------------- + // PreRead and PreCPUStats contain the previous sample of stats for + // the container, and can be used to perform delta-calculation. + // ------------------------------------------------------------------------- + + // PreRead is the date and time at which this first sample was collected. + // This field is not propagated if the "one-shot" option is set. If the + // "one-shot" option is set, this field may be omitted, empty, or set + // to a default date (`0001-01-01T00:00:00Z`). + PreRead time.Time `json:"preread"` + + // PreCPUStats contains the CPUStats of the previous sample. + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/container/top_response.go b/vendor/github.com/moby/moby/api/types/container/top_response.go similarity index 63% rename from vendor/github.com/docker/docker/api/types/container/top_response.go rename to vendor/github.com/moby/moby/api/types/container/top_response.go index b4bae5ef0..966603617 100644 --- a/vendor/github.com/docker/docker/api/types/container/top_response.go +++ b/vendor/github.com/moby/moby/api/types/container/top_response.go @@ -1,3 +1,5 @@ +// Code generated by go-swagger; DO NOT EDIT. + package container // This file was generated by the swagger tool. @@ -6,13 +8,16 @@ package container // TopResponse ContainerTopResponse // // Container "top" response. +// // swagger:model TopResponse type TopResponse struct { // Each process running in the container, where each process // is an array of values corresponding to the titles. + // Example: {"Processes":[["root","13642","882","0","17:03","pts/0","00:00:00","/bin/bash"],["root","13735","13642","0","17:06","pts/0","00:00:00","sleep 10"]]} Processes [][]string `json:"Processes"` // The ps column titles + // Example: {"Titles":["UID","PID","PPID","C","STIME","TTY","TIME","CMD"]} Titles []string `json:"Titles"` } diff --git a/vendor/github.com/docker/docker/api/types/container/update_response.go b/vendor/github.com/moby/moby/api/types/container/update_response.go similarity index 76% rename from vendor/github.com/docker/docker/api/types/container/update_response.go rename to vendor/github.com/moby/moby/api/types/container/update_response.go index e2b5bf5ac..2f7263b14 100644 --- a/vendor/github.com/docker/docker/api/types/container/update_response.go +++ b/vendor/github.com/moby/moby/api/types/container/update_response.go @@ -1,3 +1,5 @@ +// Code generated by go-swagger; DO NOT EDIT. + package container // This file was generated by the swagger tool. @@ -6,9 +8,11 @@ package container // UpdateResponse ContainerUpdateResponse // // Response for a successful container-update. +// // swagger:model UpdateResponse type UpdateResponse struct { // Warnings encountered when updating the container. + // Example: ["Published ports are discarded when using host network mode"] Warnings []string `json:"Warnings"` } diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/moby/moby/api/types/container/wait_exit_error.go similarity index 86% rename from vendor/github.com/docker/docker/api/types/container/wait_exit_error.go rename to vendor/github.com/moby/moby/api/types/container/wait_exit_error.go index ab56d4eed..96a7770c3 100644 --- a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go +++ b/vendor/github.com/moby/moby/api/types/container/wait_exit_error.go @@ -1,9 +1,12 @@ +// Code generated by go-swagger; DO NOT EDIT. + package container // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // WaitExitError container waiting error, if any +// // swagger:model WaitExitError type WaitExitError struct { diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/moby/moby/api/types/container/wait_response.go similarity index 80% rename from vendor/github.com/docker/docker/api/types/container/wait_response.go rename to vendor/github.com/moby/moby/api/types/container/wait_response.go index 84fc6afdd..68d3c3872 100644 --- a/vendor/github.com/docker/docker/api/types/container/wait_response.go +++ b/vendor/github.com/moby/moby/api/types/container/wait_response.go @@ -1,3 +1,5 @@ +// Code generated by go-swagger; DO NOT EDIT. + package container // This file was generated by the swagger tool. @@ -5,7 +7,8 @@ package container // WaitResponse ContainerWaitResponse // -// OK response to ContainerWait operation +// # OK response to ContainerWait operation +// // swagger:model WaitResponse type WaitResponse struct { diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/moby/moby/api/types/container/waitcondition.go similarity index 91% rename from vendor/github.com/docker/docker/api/types/container/waitcondition.go rename to vendor/github.com/moby/moby/api/types/container/waitcondition.go index cd8311f99..64820fe35 100644 --- a/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ b/vendor/github.com/moby/moby/api/types/container/waitcondition.go @@ -1,4 +1,4 @@ -package container // import "github.com/docker/docker/api/types/container" +package container // WaitCondition is a type used to specify a container state for which // to wait. diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/moby/moby/api/types/events/events.go similarity index 89% rename from vendor/github.com/docker/docker/api/types/events/events.go rename to vendor/github.com/moby/moby/api/types/events/events.go index e225df4ec..b8393addd 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/moby/moby/api/types/events/events.go @@ -1,5 +1,4 @@ -package events // import "github.com/docker/docker/api/types/events" -import "github.com/docker/docker/api/types/filters" +package events // Type is used for event-types. type Type string @@ -111,12 +110,6 @@ type Actor struct { // Message represents the information an event contains type Message struct { - // Deprecated information from JSONMessage. - // With data only in container events. - Status string `json:"status,omitempty"` // Deprecated: use Action instead. - ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. - From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. - Type Type Action Action Actor Actor @@ -126,10 +119,3 @@ type Message struct { Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` } - -// ListOptions holds parameters to filter events with. -type ListOptions struct { - Since string - Until string - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/image/delete_response.go b/vendor/github.com/moby/moby/api/types/image/delete_response.go similarity index 89% rename from vendor/github.com/docker/docker/api/types/image/delete_response.go rename to vendor/github.com/moby/moby/api/types/image/delete_response.go index 998620dc6..b19119a38 100644 --- a/vendor/github.com/docker/docker/api/types/image/delete_response.go +++ b/vendor/github.com/moby/moby/api/types/image/delete_response.go @@ -1,9 +1,12 @@ +// Code generated by go-swagger; DO NOT EDIT. + package image // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // DeleteResponse delete response +// // swagger:model DeleteResponse type DeleteResponse struct { diff --git a/vendor/github.com/moby/moby/api/types/image/disk_usage.go b/vendor/github.com/moby/moby/api/types/image/disk_usage.go new file mode 100644 index 000000000..7297813c1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package image + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage for image resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active images. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of image summaries. + // + Items []Summary `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing unused images. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all images. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by images. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/image.go b/vendor/github.com/moby/moby/api/types/image/image.go new file mode 100644 index 000000000..1c8990ae9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/image.go @@ -0,0 +1,18 @@ +package image + +import ( + "time" +) + +// Metadata contains engine-local data about the image. +type Metadata struct { + // LastTagTime is the date and time at which the image was last tagged. + LastTagTime time.Time `json:",omitempty"` +} + +// PruneReport contains the response for Engine API: +// POST "/images/prune" +type PruneReport struct { + ImagesDeleted []DeleteResponse + SpaceReclaimed uint64 +} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/moby/moby/api/types/image/image_history.go similarity index 84% rename from vendor/github.com/docker/docker/api/types/image/image_history.go rename to vendor/github.com/moby/moby/api/types/image/image_history.go index e302bb0ae..648ba779e 100644 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ b/vendor/github.com/moby/moby/api/types/image/image_history.go @@ -1,12 +1,15 @@ -package image // import "github.com/docker/docker/api/types/image" +// Code generated by go-swagger; DO NOT EDIT. + +package image // ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. +// Code generated by `swagger generate operation`. // // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- // HistoryResponseItem individual image layer information in response to ImageHistory operation +// // swagger:model HistoryResponseItem type HistoryResponseItem struct { diff --git a/vendor/github.com/docker/docker/api/types/image/image_inspect.go b/vendor/github.com/moby/moby/api/types/image/image_inspect.go similarity index 70% rename from vendor/github.com/docker/docker/api/types/image/image_inspect.go rename to vendor/github.com/moby/moby/api/types/image/image_inspect.go index 3bdb47428..66a277e55 100644 --- a/vendor/github.com/docker/docker/api/types/image/image_inspect.go +++ b/vendor/github.com/moby/moby/api/types/image/image_inspect.go @@ -1,9 +1,8 @@ package image import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/storage" dockerspec "github.com/moby/docker-image-spec/specs-go/v1" + "github.com/moby/moby/api/types/storage" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -43,16 +42,9 @@ type InspectResponse struct { // the manifest is generated and its digest calculated. RepoDigests []string - // Parent is the ID of the parent image. - // - // Depending on how the image was created, this field may be empty and - // is only set for images that were built/created locally. This field - // is empty if the image was pulled from an image registry. - Parent string - // Comment is an optional message that can be set when committing or - // importing the image. - Comment string + // importing the image. This field is omitted if not set. + Comment string `json:",omitempty"` // Created is the date and time at which the image was created, formatted in // RFC 3339 nano-seconds (time.RFC3339Nano). @@ -61,30 +53,10 @@ type InspectResponse struct { // and omitted otherwise. Created string `json:",omitempty"` - // Container is the ID of the container that was used to create the image. - // - // Depending on how the image was created, this field may be empty. - // - // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - Container string `json:",omitempty"` - - // ContainerConfig is an optional field containing the configuration of the - // container that was last committed when creating the image. - // - // Previous versions of Docker builder used this field to store build cache, - // and it is not in active use anymore. - // - // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - ContainerConfig *container.Config `json:",omitempty"` - - // DockerVersion is the version of Docker that was used to build the image. - // - // Depending on how the image was created, this field may be empty. - DockerVersion string - // Author is the name of the author that was specified when committing the // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. - Author string + // This field is omitted if not set. + Author string `json:",omitempty"` Config *dockerspec.DockerOCIImageConfig // Architecture is the hardware CPU architecture that the image runs on. @@ -103,15 +75,9 @@ type InspectResponse struct { // Size is the total size of the image including all layers it is composed of. Size int64 - // VirtualSize is the total size of the image including all layers it is - // composed of. - // - // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - VirtualSize int64 `json:"VirtualSize,omitempty"` - // GraphDriver holds information about the storage driver used to store the // container's and image's filesystem. - GraphDriver storage.DriverData + GraphDriver *storage.DriverData `json:"GraphDriver,omitempty"` // RootFS contains information about the image's RootFS, including the // layer IDs. diff --git a/vendor/github.com/docker/docker/api/types/image/manifest.go b/vendor/github.com/moby/moby/api/types/image/manifest.go similarity index 100% rename from vendor/github.com/docker/docker/api/types/image/manifest.go rename to vendor/github.com/moby/moby/api/types/image/manifest.go diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/moby/moby/api/types/image/summary.go similarity index 93% rename from vendor/github.com/docker/docker/api/types/image/summary.go rename to vendor/github.com/moby/moby/api/types/image/summary.go index c5ae6ab9c..3d4dd165a 100644 --- a/vendor/github.com/docker/docker/api/types/image/summary.go +++ b/vendor/github.com/moby/moby/api/types/image/summary.go @@ -3,7 +3,6 @@ package image import ocispec "github.com/opencontainers/image-spec/specs-go/v1" type Summary struct { - // Number of containers using this image. Includes both stopped and running // containers. // @@ -93,9 +92,4 @@ type Summary struct { // // Required: true Size int64 `json:"Size"` - - // Total size of the image including all layers it is composed of. - // - // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - VirtualSize int64 `json:"VirtualSize,omitempty"` } diff --git a/vendor/github.com/moby/moby/api/types/jsonstream/json_error.go b/vendor/github.com/moby/moby/api/types/jsonstream/json_error.go new file mode 100644 index 000000000..632b25fdf --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/jsonstream/json_error.go @@ -0,0 +1,12 @@ +package jsonstream + +// Error wraps a concrete Code and Message, Code is +// an integer error code, Message is the error message. +type Error struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *Error) Error() string { + return e.Message +} diff --git a/vendor/github.com/moby/moby/api/types/jsonstream/message.go b/vendor/github.com/moby/moby/api/types/jsonstream/message.go new file mode 100644 index 000000000..2e1346d41 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/jsonstream/message.go @@ -0,0 +1,15 @@ +package jsonstream + +import "encoding/json" + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type Message struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *Progress `json:"progressDetail,omitempty"` + ID string `json:"id,omitempty"` + Error *Error `json:"errorDetail,omitempty"` + Aux *json.RawMessage `json:"aux,omitempty"` // Aux contains out-of-band data, such as digests for push signing and image id after building. +} diff --git a/vendor/github.com/moby/moby/api/types/jsonstream/progress.go b/vendor/github.com/moby/moby/api/types/jsonstream/progress.go new file mode 100644 index 000000000..5c38b3b5e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/jsonstream/progress.go @@ -0,0 +1,10 @@ +package jsonstream + +// Progress describes a progress message in a JSON stream. +type Progress struct { + Current int64 `json:"current,omitempty"` // Current is the current status and value of the progress made towards Total. + Total int64 `json:"total,omitempty"` // Total is the end value describing when we made 100% progress for an operation. + Start int64 `json:"start,omitempty"` // Start is the initial value for the operation. + HideCounts bool `json:"hidecounts,omitempty"` // HideCounts. if true, hides the progress count indicator (xB/yB). + Units string `json:"units,omitempty"` // Units is the unit to print for progress. It defaults to "bytes" if empty. +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/moby/moby/api/types/mount/mount.go similarity index 98% rename from vendor/github.com/docker/docker/api/types/mount/mount.go rename to vendor/github.com/moby/moby/api/types/mount/mount.go index d98dbec99..090d436c6 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/moby/moby/api/types/mount/mount.go @@ -1,4 +1,4 @@ -package mount // import "github.com/docker/docker/api/types/mount" +package mount import ( "os" diff --git a/vendor/github.com/moby/moby/api/types/network/config_reference.go b/vendor/github.com/moby/moby/api/types/network/config_reference.go new file mode 100644 index 000000000..1158afe65 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/config_reference.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ConfigReference The config-only network source to provide the configuration for +// this network. +// +// swagger:model ConfigReference +type ConfigReference struct { + + // The name of the config-only network that provides the network's + // configuration. The specified network must be an existing config-only + // network. Only network names are allowed, not network IDs. + // + // Example: config_only_network_01 + Network string `json:"Network"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/connect_request.go b/vendor/github.com/moby/moby/api/types/network/connect_request.go new file mode 100644 index 000000000..2ff14d360 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/connect_request.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ConnectRequest NetworkConnectRequest represents the data to be used to connect a container to a network. +// +// swagger:model ConnectRequest +type ConnectRequest struct { + + // The ID or name of the container to connect to the network. + // Example: 3613f73ba0e4 + // Required: true + Container string `json:"Container"` + + // endpoint config + EndpointConfig *EndpointSettings `json:"EndpointConfig,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/network/create_response.go b/vendor/github.com/moby/moby/api/types/network/create_response.go similarity index 71% rename from vendor/github.com/docker/docker/api/types/network/create_response.go rename to vendor/github.com/moby/moby/api/types/network/create_response.go index c32b35bff..199705991 100644 --- a/vendor/github.com/docker/docker/api/types/network/create_response.go +++ b/vendor/github.com/moby/moby/api/types/network/create_response.go @@ -1,3 +1,5 @@ +// Code generated by go-swagger; DO NOT EDIT. + package network // This file was generated by the swagger tool. @@ -5,11 +7,13 @@ package network // CreateResponse NetworkCreateResponse // -// OK response to NetworkCreate operation +// # OK response to NetworkCreate operation +// // swagger:model CreateResponse type CreateResponse struct { // The ID of the created network. + // Example: b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d // Required: true ID string `json:"Id"` diff --git a/vendor/github.com/moby/moby/api/types/network/disconnect_request.go b/vendor/github.com/moby/moby/api/types/network/disconnect_request.go new file mode 100644 index 000000000..7b1f521e7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/disconnect_request.go @@ -0,0 +1,21 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DisconnectRequest NetworkDisconnectRequest represents the data to be used to disconnect a container from a network. +// +// swagger:model DisconnectRequest +type DisconnectRequest struct { + + // The ID or name of the container to disconnect from the network. + // Example: 3613f73ba0e4 + // Required: true + Container string `json:"Container"` + + // Force the container to disconnect from the network. + // Example: false + Force bool `json:"Force"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/endpoint.go b/vendor/github.com/moby/moby/api/types/network/endpoint.go new file mode 100644 index 000000000..cd39c579f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/endpoint.go @@ -0,0 +1,74 @@ +package network + +import ( + "maps" + "net/netip" + "slices" +) + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configuration data + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint. + DriverOpts map[string]string + + // GwPriority determines which endpoint will provide the default gateway + // for the container. The endpoint with the highest priority will be used. + // If multiple endpoints have the same priority, they are lexicographically + // sorted based on their network name, and the one that sorts first is picked. + GwPriority int + + // Operational data + + NetworkID string + EndpointID string + Gateway netip.Addr + IPAddress netip.Addr + + // MacAddress may be used to specify a MAC address when the container is created. + // Once the container is running, it becomes operational data (it may contain a + // generated address). + MacAddress HardwareAddr + IPPrefixLen int + IPv6Gateway netip.Addr + GlobalIPv6Address netip.Addr + GlobalIPv6PrefixLen int + // DNSNames holds all the (non fully qualified) DNS names associated to this + // endpoint. The first entry is used to generate PTR records. + DNSNames []string +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + if es == nil { + return nil + } + + epCopy := *es + epCopy.IPAMConfig = es.IPAMConfig.Copy() + epCopy.Links = slices.Clone(es.Links) + epCopy.Aliases = slices.Clone(es.Aliases) + epCopy.DNSNames = slices.Clone(es.DNSNames) + epCopy.DriverOpts = maps.Clone(es.DriverOpts) + + return &epCopy +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address netip.Addr `json:",omitempty"` + IPv6Address netip.Addr `json:",omitempty"` + LinkLocalIPs []netip.Addr `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + if cfg == nil { + return nil + } + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = slices.Clone(cfg.LinkLocalIPs) + return &cfgCopy +} diff --git a/vendor/github.com/moby/moby/api/types/network/endpoint_resource.go b/vendor/github.com/moby/moby/api/types/network/endpoint_resource.go new file mode 100644 index 000000000..bf493ad5d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/endpoint_resource.go @@ -0,0 +1,35 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// EndpointResource contains network resources allocated and used for a container in a network. +// +// swagger:model EndpointResource +type EndpointResource struct { + + // name + // Example: container_1 + Name string `json:"Name"` + + // endpoint ID + // Example: 628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a + EndpointID string `json:"EndpointID"` + + // mac address + // Example: 02:42:ac:13:00:02 + MacAddress HardwareAddr `json:"MacAddress"` + + // IPv4 address + // Example: 172.19.0.2/16 + IPv4Address netip.Prefix `json:"IPv4Address"` + + // IPv6 address + IPv6Address netip.Prefix `json:"IPv6Address"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/hwaddr.go b/vendor/github.com/moby/moby/api/types/network/hwaddr.go new file mode 100644 index 000000000..9d0c2b4b6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/hwaddr.go @@ -0,0 +1,37 @@ +package network + +import ( + "encoding" + "fmt" + "net" +) + +// A HardwareAddr represents a physical hardware address. +// It implements [encoding.TextMarshaler] and [encoding.TextUnmarshaler] +// in the absence of go.dev/issue/29678. +type HardwareAddr net.HardwareAddr + +var _ encoding.TextMarshaler = (HardwareAddr)(nil) +var _ encoding.TextUnmarshaler = (*HardwareAddr)(nil) +var _ fmt.Stringer = (HardwareAddr)(nil) + +func (m *HardwareAddr) UnmarshalText(text []byte) error { + if len(text) == 0 { + *m = nil + return nil + } + hw, err := net.ParseMAC(string(text)) + if err != nil { + return err + } + *m = HardwareAddr(hw) + return nil +} + +func (m HardwareAddr) MarshalText() ([]byte, error) { + return []byte(net.HardwareAddr(m).String()), nil +} + +func (m HardwareAddr) String() string { + return net.HardwareAddr(m).String() +} diff --git a/vendor/github.com/moby/moby/api/types/network/inspect.go b/vendor/github.com/moby/moby/api/types/network/inspect.go new file mode 100644 index 000000000..cded5e608 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/inspect.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Inspect The body of the "get network" http response message. +// +// swagger:model Inspect +type Inspect struct { + Network + + // Contains endpoints attached to the network. + // + // Example: {"19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c":{"EndpointID":"628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a","IPv4Address":"172.19.0.2/16","IPv6Address":"","MacAddress":"02:42:ac:13:00:02","Name":"test"}} + Containers map[string]EndpointResource `json:"Containers"` + + // List of services using the network. This field is only present for + // swarm scope networks, and omitted for local scope networks. + // + Services map[string]ServiceInfo `json:"Services,omitempty"` + + // provides runtime information about the network such as the number of allocated IPs. + // + Status *Status `json:"Status,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/ipam.go b/vendor/github.com/moby/moby/api/types/network/ipam.go new file mode 100644 index 000000000..e57be481b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/ipam.go @@ -0,0 +1,22 @@ +package network + +import ( + "net/netip" +) + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet netip.Prefix `json:",omitempty"` + IPRange netip.Prefix `json:",omitempty"` + Gateway netip.Addr `json:",omitempty"` + AuxAddress map[string]netip.Addr `json:"AuxiliaryAddresses,omitempty"` +} + +type SubnetStatuses = map[netip.Prefix]SubnetStatus diff --git a/vendor/github.com/moby/moby/api/types/network/ipam_status.go b/vendor/github.com/moby/moby/api/types/network/ipam_status.go new file mode 100644 index 000000000..7eb4e8487 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/ipam_status.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IPAMStatus IPAM status +// +// swagger:model IPAMStatus +type IPAMStatus struct { + + // subnets + // Example: {"172.16.0.0/16":{"DynamicIPsAvailable":65533,"IPsInUse":3},"2001:db8:abcd:0012::0/96":{"DynamicIPsAvailable":4294967291,"IPsInUse":5}} + Subnets SubnetStatuses `json:"Subnets,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/network.go b/vendor/github.com/moby/moby/api/types/network/network.go new file mode 100644 index 000000000..a7d9c0f6a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/network.go @@ -0,0 +1,100 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + timeext "time" +) + +// Network network +// +// swagger:model Network +type Network struct { + + // Name of the network. + // + // Example: my_network + Name string `json:"Name"` + + // ID that uniquely identifies a network on a single machine. + // + // Example: 7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 + ID string `json:"Id"` + + // Date and time at which the network was created in + // [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + // + // Example: 2016-10-19T04:33:30.360899459Z + Created timeext.Time `json:"Created"` + + // The level at which the network exists (e.g. `swarm` for cluster-wide + // or `local` for machine level) + // + // Example: local + Scope string `json:"Scope"` + + // The name of the driver used to create the network (e.g. `bridge`, + // `overlay`). + // + // Example: overlay + Driver string `json:"Driver"` + + // Whether the network was created with IPv4 enabled. + // + // Example: true + EnableIPv4 bool `json:"EnableIPv4"` + + // Whether the network was created with IPv6 enabled. + // + // Example: false + EnableIPv6 bool `json:"EnableIPv6"` + + // The network's IP Address Management. + // + IPAM IPAM `json:"IPAM"` + + // Whether the network is created to only allow internal networking + // connectivity. + // + // Example: false + Internal bool `json:"Internal"` + + // Whether a global / swarm scope network is manually attachable by regular + // containers from workers in swarm mode. + // + // Example: false + Attachable bool `json:"Attachable"` + + // Whether the network is providing the routing-mesh for the swarm cluster. + // + // Example: false + Ingress bool `json:"Ingress"` + + // config from + ConfigFrom ConfigReference `json:"ConfigFrom"` + + // Whether the network is a config-only network. Config-only networks are + // placeholder networks for network configurations to be used by other + // networks. Config-only networks cannot be used directly to run containers + // or services. + // + ConfigOnly bool `json:"ConfigOnly"` + + // Network-specific options uses when creating the network. + // + // Example: {"com.docker.network.bridge.default_bridge":"true","com.docker.network.bridge.enable_icc":"true","com.docker.network.bridge.enable_ip_masquerade":"true","com.docker.network.bridge.host_binding_ipv4":"0.0.0.0","com.docker.network.bridge.name":"docker0","com.docker.network.driver.mtu":"1500"} + Options map[string]string `json:"Options"` + + // Metadata specific to the network being created. + // + // Example: {"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"} + Labels map[string]string `json:"Labels"` + + // List of peer nodes for an overlay network. This field is only present + // for overlay networks, and omitted for other network types. + // + Peers []PeerInfo `json:"Peers,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/network_types.go b/vendor/github.com/moby/moby/api/types/network/network_types.go new file mode 100644 index 000000000..5401f55f8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/network_types.go @@ -0,0 +1,43 @@ +package network + +const ( + // NetworkDefault is a platform-independent alias to choose the platform-specific default network stack. + NetworkDefault = "default" + // NetworkHost is the name of the predefined network used when the NetworkMode host is selected (only available on Linux) + NetworkHost = "host" + // NetworkNone is the name of the predefined network used when the NetworkMode none is selected (available on both Linux and Windows) + NetworkNone = "none" + // NetworkBridge is the name of the default network on Linux + NetworkBridge = "bridge" + // NetworkNat is the name of the default network on Windows + NetworkNat = "nat" +) + +// CreateRequest is the request message sent to the server for network create call. +type CreateRequest struct { + Name string // Name is the requested name of the network. + Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). + EnableIPv4 *bool `json:",omitempty"` // EnableIPv4 represents whether to enable IPv4. + EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6. + IPAM *IPAM // IPAM is the network's IP Address Management. + Internal bool // Internal represents if the network is used internal only. + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly]. + Options map[string]string // Options specifies the network-specific options to use for when creating the network. + Labels map[string]string // Labels holds metadata specific to the network being created. +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// PruneReport contains the response for Engine API: +// POST "/networks/prune" +type PruneReport struct { + NetworksDeleted []string +} diff --git a/vendor/github.com/moby/moby/api/types/network/peer_info.go b/vendor/github.com/moby/moby/api/types/network/peer_info.go new file mode 100644 index 000000000..dc88ec16f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/peer_info.go @@ -0,0 +1,24 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// PeerInfo represents one peer of an overlay network. +// +// swagger:model PeerInfo +type PeerInfo struct { + + // ID of the peer-node in the Swarm cluster. + // Example: 6869d7c1732b + Name string `json:"Name"` + + // IP-address of the peer-node in the Swarm cluster. + // Example: 10.133.77.91 + IP netip.Addr `json:"IP"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/port.go b/vendor/github.com/moby/moby/api/types/network/port.go new file mode 100644 index 000000000..171d9f51d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/port.go @@ -0,0 +1,346 @@ +package network + +import ( + "errors" + "fmt" + "iter" + "net/netip" + "strconv" + "strings" + "unique" +) + +// IPProtocol represents a network protocol for a port. +type IPProtocol string + +const ( + TCP IPProtocol = "tcp" + UDP IPProtocol = "udp" + SCTP IPProtocol = "sctp" +) + +// Sentinel port proto value for zero Port and PortRange values. +var protoZero unique.Handle[IPProtocol] + +// Port is a type representing a single port number and protocol in the format "/[]". +// +// The zero port value, i.e. Port{}, is invalid; use [ParsePort] to create a valid Port value. +type Port struct { + num uint16 + proto unique.Handle[IPProtocol] +} + +// ParsePort parses s as a [Port]. +// +// It normalizes the provided protocol such that "80/tcp", "80/TCP", and "80/tCp" are equivalent. +// If a port number is provided, but no protocol, the default ("tcp") protocol is returned. +func ParsePort(s string) (Port, error) { + if s == "" { + return Port{}, errors.New("invalid port: value is empty") + } + + port, proto, _ := strings.Cut(s, "/") + + portNum, err := parsePortNumber(port) + if err != nil { + return Port{}, fmt.Errorf("invalid port '%s': %w", port, err) + } + + normalizedPortProto := normalizePortProto(proto) + return Port{num: portNum, proto: normalizedPortProto}, nil +} + +// MustParsePort calls [ParsePort](s) and panics on error. +// +// It is intended for use in tests with hard-coded strings. +func MustParsePort(s string) Port { + p, err := ParsePort(s) + if err != nil { + panic(err) + } + return p +} + +// PortFrom returns a [Port] with the given number and protocol. +// +// If no protocol is specified (i.e. proto == ""), then PortFrom returns Port{}, false. +func PortFrom(num uint16, proto IPProtocol) (p Port, ok bool) { + if proto == "" { + return Port{}, false + } + normalized := normalizePortProto(string(proto)) + return Port{num: num, proto: normalized}, true +} + +// Num returns p's port number. +func (p Port) Num() uint16 { + return p.num +} + +// Proto returns p's network protocol. +func (p Port) Proto() IPProtocol { + return p.proto.Value() +} + +// IsZero reports whether p is the zero value. +func (p Port) IsZero() bool { + return p.proto == protoZero +} + +// IsValid reports whether p is an initialized valid port (not the zero value). +func (p Port) IsValid() bool { + return p.proto != protoZero +} + +// String returns a string representation of the port in the format "/". +// If the port is the zero value, it returns "invalid port". +func (p Port) String() string { + switch p.proto { + case protoZero: + return "invalid port" + default: + return string(p.AppendTo(nil)) + } +} + +// AppendText implements [encoding.TextAppender] interface. +// It is the same as [Port.AppendTo] but returns an error to satisfy the interface. +func (p Port) AppendText(b []byte) ([]byte, error) { + return p.AppendTo(b), nil +} + +// AppendTo appends a text encoding of p to b and returns the extended buffer. +func (p Port) AppendTo(b []byte) []byte { + if p.IsZero() { + return b + } + return fmt.Appendf(b, "%d/%s", p.num, p.proto.Value()) +} + +// MarshalText implements [encoding.TextMarshaler] interface. +func (p Port) MarshalText() ([]byte, error) { + return p.AppendText(nil) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] interface. +func (p *Port) UnmarshalText(text []byte) error { + if len(text) == 0 { + *p = Port{} + return nil + } + + port, err := ParsePort(string(text)) + if err != nil { + return err + } + + *p = port + return nil +} + +// Range returns a [PortRange] representing the single port. +func (p Port) Range() PortRange { + return PortRange{start: p.num, end: p.num, proto: p.proto} +} + +// PortSet is a collection of structs indexed by [Port]. +type PortSet = map[Port]struct{} + +// PortBinding represents a binding between a Host IP address and a Host Port. +type PortBinding struct { + // HostIP is the host IP Address + HostIP netip.Addr `json:"HostIp"` + // HostPort is the host port number + HostPort string `json:"HostPort"` +} + +// PortMap is a collection of [PortBinding] indexed by [Port]. +type PortMap = map[Port][]PortBinding + +// PortRange represents a range of port numbers and a protocol in the format "8000-9000/tcp". +// +// The zero port range value, i.e. PortRange{}, is invalid; use [ParsePortRange] to create a valid PortRange value. +type PortRange struct { + start uint16 + end uint16 + proto unique.Handle[IPProtocol] +} + +// ParsePortRange parses s as a [PortRange]. +// +// It normalizes the provided protocol such that "80-90/tcp", "80-90/TCP", and "80-90/tCp" are equivalent. +// If a port number range is provided, but no protocol, the default ("tcp") protocol is returned. +func ParsePortRange(s string) (PortRange, error) { + if s == "" { + return PortRange{}, errors.New("invalid port range: value is empty") + } + + portRange, proto, _ := strings.Cut(s, "/") + + start, end, ok := strings.Cut(portRange, "-") + startVal, err := parsePortNumber(start) + if err != nil { + return PortRange{}, fmt.Errorf("invalid start port '%s': %w", start, err) + } + + portProto := normalizePortProto(proto) + + if !ok || start == end { + return PortRange{start: startVal, end: startVal, proto: portProto}, nil + } + + endVal, err := parsePortNumber(end) + if err != nil { + return PortRange{}, fmt.Errorf("invalid end port '%s': %w", end, err) + } + if endVal < startVal { + return PortRange{}, errors.New("invalid port range: " + s) + } + return PortRange{start: startVal, end: endVal, proto: portProto}, nil +} + +// MustParsePortRange calls [ParsePortRange](s) and panics on error. +// It is intended for use in tests with hard-coded strings. +func MustParsePortRange(s string) PortRange { + pr, err := ParsePortRange(s) + if err != nil { + panic(err) + } + return pr +} + +// PortRangeFrom returns a [PortRange] with the given start and end port numbers and protocol. +// +// If end < start or no protocol is specified (i.e. proto == ""), then PortRangeFrom returns PortRange{}, false. +func PortRangeFrom(start, end uint16, proto IPProtocol) (pr PortRange, ok bool) { + if end < start || proto == "" { + return PortRange{}, false + } + normalized := normalizePortProto(string(proto)) + return PortRange{start: start, end: end, proto: normalized}, true +} + +// Start returns pr's start port number. +func (pr PortRange) Start() uint16 { + return pr.start +} + +// End returns pr's end port number. +func (pr PortRange) End() uint16 { + return pr.end +} + +// Proto returns pr's network protocol. +func (pr PortRange) Proto() IPProtocol { + return pr.proto.Value() +} + +// IsZero reports whether pr is the zero value. +func (pr PortRange) IsZero() bool { + return pr.proto == protoZero +} + +// IsValid reports whether pr is an initialized valid port range (not the zero value). +func (pr PortRange) IsValid() bool { + return pr.proto != protoZero +} + +// String returns a string representation of the port range in the format "-/" or "/" if start == end. +// If the port range is the zero value, it returns "invalid port range". +func (pr PortRange) String() string { + switch pr.proto { + case protoZero: + return "invalid port range" + default: + return string(pr.AppendTo(nil)) + } +} + +// AppendText implements [encoding.TextAppender] interface. +// It is the same as [PortRange.AppendTo] but returns an error to satisfy the interface. +func (pr PortRange) AppendText(b []byte) ([]byte, error) { + return pr.AppendTo(b), nil +} + +// AppendTo appends a text encoding of pr to b and returns the extended buffer. +func (pr PortRange) AppendTo(b []byte) []byte { + if pr.IsZero() { + return b + } + if pr.start == pr.end { + return fmt.Appendf(b, "%d/%s", pr.start, pr.proto.Value()) + } + return fmt.Appendf(b, "%d-%d/%s", pr.start, pr.end, pr.proto.Value()) +} + +// MarshalText implements [encoding.TextMarshaler] interface. +func (pr PortRange) MarshalText() ([]byte, error) { + return pr.AppendText(nil) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] interface. +func (pr *PortRange) UnmarshalText(text []byte) error { + if len(text) == 0 { + *pr = PortRange{} + return nil + } + + portRange, err := ParsePortRange(string(text)) + if err != nil { + return err + } + *pr = portRange + return nil +} + +// Range returns pr. +func (pr PortRange) Range() PortRange { + return pr +} + +// All returns an iterator over all the individual ports in the range. +// +// For example: +// +// for port := range pr.All() { +// // ... +// } +func (pr PortRange) All() iter.Seq[Port] { + return func(yield func(Port) bool) { + for i := uint32(pr.Start()); i <= uint32(pr.End()); i++ { + if !yield(Port{num: uint16(i), proto: pr.proto}) { + return + } + } + } +} + +// parsePortNumber parses rawPort into an int, unwrapping strconv errors +// and returning a single "out of range" error for any value outside 0–65535. +func parsePortNumber(rawPort string) (uint16, error) { + if rawPort == "" { + return 0, errors.New("value is empty") + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + var numErr *strconv.NumError + if errors.As(err, &numErr) { + err = numErr.Err + } + return 0, err + } + + return uint16(port), nil +} + +// normalizePortProto normalizes the protocol string such that "tcp", "TCP", and "tCp" are equivalent. +// If proto is not specified, it defaults to "tcp". +func normalizePortProto(proto string) unique.Handle[IPProtocol] { + if proto == "" { + return unique.Make(TCP) + } + + proto = strings.ToLower(proto) + + return unique.Make(IPProtocol(proto)) +} diff --git a/vendor/github.com/moby/moby/api/types/network/service_info.go b/vendor/github.com/moby/moby/api/types/network/service_info.go new file mode 100644 index 000000000..fdd92f161 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/service_info.go @@ -0,0 +1,28 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// ServiceInfo represents service parameters with the list of service's tasks +// +// swagger:model ServiceInfo +type ServiceInfo struct { + + // v IP + VIP netip.Addr `json:"VIP"` + + // ports + Ports []string `json:"Ports"` + + // local l b index + LocalLBIndex int `json:"LocalLBIndex"` + + // tasks + Tasks []Task `json:"Tasks"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/status.go b/vendor/github.com/moby/moby/api/types/network/status.go new file mode 100644 index 000000000..94f4b4b2e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/status.go @@ -0,0 +1,15 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Status provides runtime information about the network such as the number of allocated IPs. +// +// swagger:model Status +type Status struct { + + // IPAM + IPAM IPAMStatus `json:"IPAM"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/subnet_status.go b/vendor/github.com/moby/moby/api/types/network/subnet_status.go new file mode 100644 index 000000000..dd62429f5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/subnet_status.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// SubnetStatus subnet status +// +// swagger:model SubnetStatus +type SubnetStatus struct { + + // Number of IP addresses in the subnet that are in use or reserved and are therefore unavailable for allocation, saturating at 264 - 1. + // + IPsInUse uint64 `json:"IPsInUse"` + + // Number of IP addresses within the network's IPRange for the subnet that are available for allocation, saturating at 264 - 1. + // + DynamicIPsAvailable uint64 `json:"DynamicIPsAvailable"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/summary.go b/vendor/github.com/moby/moby/api/types/network/summary.go new file mode 100644 index 000000000..3f50ce227 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/summary.go @@ -0,0 +1,13 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Summary Network list response item +// +// swagger:model Summary +type Summary struct { + Network +} diff --git a/vendor/github.com/moby/moby/api/types/network/task.go b/vendor/github.com/moby/moby/api/types/network/task.go new file mode 100644 index 000000000..a547523a4 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/task.go @@ -0,0 +1,28 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// Task carries the information about one backend task +// +// swagger:model Task +type Task struct { + + // name + Name string `json:"Name"` + + // endpoint ID + EndpointID string `json:"EndpointID"` + + // endpoint IP + EndpointIP netip.Addr `json:"EndpointIP"` + + // info + Info map[string]string `json:"Info"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/.gitignore b/vendor/github.com/moby/moby/api/types/plugin/.gitignore new file mode 100644 index 000000000..5cea8434d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/.gitignore @@ -0,0 +1 @@ +testdata/rapid/** diff --git a/vendor/github.com/moby/moby/api/types/plugin/capability.go b/vendor/github.com/moby/moby/api/types/plugin/capability.go new file mode 100644 index 000000000..d53f77a1f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/capability.go @@ -0,0 +1,55 @@ +package plugin + +import ( + "bytes" + "encoding" + "fmt" + "strings" +) + +type CapabilityID struct { + Capability string + Prefix string + Version string +} + +var ( + _ fmt.Stringer = CapabilityID{} + _ encoding.TextUnmarshaler = (*CapabilityID)(nil) + _ encoding.TextMarshaler = CapabilityID{} +) + +// String implements [fmt.Stringer] for CapabilityID +func (t CapabilityID) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] for CapabilityID +func (t *CapabilityID) UnmarshalText(p []byte) error { + fqcap, version, _ := bytes.Cut(p, []byte{'/'}) + idx := bytes.LastIndexByte(fqcap, '.') + if idx < 0 { + t.Prefix = "" + t.Capability = string(fqcap) + } else { + t.Prefix = string(fqcap[:idx]) + t.Capability = string(fqcap[idx+1:]) + } + t.Version = string(version) + return nil +} + +// MarshalText implements [encoding.TextMarshaler] for CapabilityID +func (t CapabilityID) MarshalText() ([]byte, error) { + // Assert that the value can be round-tripped successfully. + if strings.Contains(t.Capability, ".") { + return nil, fmt.Errorf("capability %q cannot contain a dot", t.Capability) + } + if strings.Contains(t.Prefix, "/") { + return nil, fmt.Errorf("prefix %q cannot contain a slash", t.Prefix) + } + if strings.Contains(t.Capability, "/") { + return nil, fmt.Errorf("capability %q cannot contain a slash", t.Capability) + } + return []byte(t.String()), nil +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/moby/moby/api/types/plugin/device.go similarity index 72% rename from vendor/github.com/docker/docker/api/types/plugin_device.go rename to vendor/github.com/moby/moby/api/types/plugin/device.go index 569901067..ae9617704 100644 --- a/vendor/github.com/docker/docker/api/types/plugin_device.go +++ b/vendor/github.com/moby/moby/api/types/plugin/device.go @@ -1,11 +1,14 @@ -package types +// Code generated by go-swagger; DO NOT EDIT. + +package plugin // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// PluginDevice plugin device -// swagger:model PluginDevice -type PluginDevice struct { +// Device device +// +// swagger:model Device +type Device struct { // description // Required: true @@ -16,6 +19,7 @@ type PluginDevice struct { Name string `json:"Name"` // path + // Example: /dev/fuse // Required: true Path *string `json:"Path"` diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/moby/moby/api/types/plugin/env.go similarity index 77% rename from vendor/github.com/docker/docker/api/types/plugin_env.go rename to vendor/github.com/moby/moby/api/types/plugin/env.go index 32962dc2e..dcbe0b762 100644 --- a/vendor/github.com/docker/docker/api/types/plugin_env.go +++ b/vendor/github.com/moby/moby/api/types/plugin/env.go @@ -1,11 +1,14 @@ -package types +// Code generated by go-swagger; DO NOT EDIT. + +package plugin // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// PluginEnv plugin env -// swagger:model PluginEnv -type PluginEnv struct { +// Env env +// +// swagger:model Env +type Env struct { // description // Required: true diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/moby/moby/api/types/plugin/mount.go similarity index 65% rename from vendor/github.com/docker/docker/api/types/plugin_mount.go rename to vendor/github.com/moby/moby/api/types/plugin/mount.go index 5c031cf8b..7970306cc 100644 --- a/vendor/github.com/docker/docker/api/types/plugin_mount.go +++ b/vendor/github.com/moby/moby/api/types/plugin/mount.go @@ -1,25 +1,32 @@ -package types +// Code generated by go-swagger; DO NOT EDIT. + +package plugin // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// PluginMount plugin mount -// swagger:model PluginMount -type PluginMount struct { +// Mount mount +// +// swagger:model Mount +type Mount struct { // description + // Example: This is a mount that's used by the plugin. // Required: true Description string `json:"Description"` // destination + // Example: /mnt/state // Required: true Destination string `json:"Destination"` // name + // Example: some-mount // Required: true Name string `json:"Name"` // options + // Example: ["rbind","rw"] // Required: true Options []string `json:"Options"` @@ -28,10 +35,12 @@ type PluginMount struct { Settable []string `json:"Settable"` // source + // Example: /var/lib/docker/plugins/ // Required: true Source *string `json:"Source"` // type + // Example: bind // Required: true Type string `json:"Type"` } diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/moby/moby/api/types/plugin/plugin.go similarity index 52% rename from vendor/github.com/docker/docker/api/types/plugin.go rename to vendor/github.com/moby/moby/api/types/plugin/plugin.go index abae48b9a..3305170d5 100644 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ b/vendor/github.com/moby/moby/api/types/plugin/plugin.go @@ -1,110 +1,126 @@ -package types +// Code generated by go-swagger; DO NOT EDIT. + +package plugin // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // Plugin A plugin for the Engine API +// // swagger:model Plugin type Plugin struct { // config // Required: true - Config PluginConfig `json:"Config"` + Config Config `json:"Config"` // True if the plugin is running. False if the plugin is not running, only installed. + // Example: true // Required: true Enabled bool `json:"Enabled"` // Id + // Example: 5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078 ID string `json:"Id,omitempty"` // name + // Example: tiborvass/sample-volume-plugin // Required: true Name string `json:"Name"` // plugin remote reference used to push/pull the plugin + // Example: localhost:5000/tiborvass/sample-volume-plugin:latest PluginReference string `json:"PluginReference,omitempty"` // settings // Required: true - Settings PluginSettings `json:"Settings"` + Settings Settings `json:"Settings"` } -// PluginConfig The config of a plugin. -// swagger:model PluginConfig -type PluginConfig struct { +// Config The config of a plugin. +// +// swagger:model Config +type Config struct { // args // Required: true - Args PluginConfigArgs `json:"Args"` + Args Args `json:"Args"` // description + // Example: A sample volume plugin for Docker // Required: true Description string `json:"Description"` - // Docker Version used to create the plugin - DockerVersion string `json:"DockerVersion,omitempty"` - // documentation + // Example: https://docs.docker.com/engine/extend/plugins/ // Required: true Documentation string `json:"Documentation"` // entrypoint + // Example: ["/usr/bin/sample-volume-plugin","/data"] // Required: true Entrypoint []string `json:"Entrypoint"` // env + // Example: [{"Description":"If set, prints debug messages","Name":"DEBUG","Settable":null,"Value":"0"}] // Required: true - Env []PluginEnv `json:"Env"` + Env []Env `json:"Env"` // interface // Required: true - Interface PluginConfigInterface `json:"Interface"` + Interface Interface `json:"Interface"` // ipc host + // Example: false // Required: true IpcHost bool `json:"IpcHost"` // linux // Required: true - Linux PluginConfigLinux `json:"Linux"` + Linux LinuxConfig `json:"Linux"` // mounts // Required: true - Mounts []PluginMount `json:"Mounts"` + Mounts []Mount `json:"Mounts"` // network // Required: true - Network PluginConfigNetwork `json:"Network"` + Network NetworkConfig `json:"Network"` // pid host + // Example: false // Required: true PidHost bool `json:"PidHost"` // propagated mount + // Example: /mnt/volumes // Required: true PropagatedMount string `json:"PropagatedMount"` // user - User PluginConfigUser `json:"User,omitempty"` + User User `json:"User,omitempty"` // work dir + // Example: /bin/ // Required: true WorkDir string `json:"WorkDir"` // rootfs - Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` + Rootfs *RootFS `json:"rootfs,omitempty"` } -// PluginConfigArgs plugin config args -// swagger:model PluginConfigArgs -type PluginConfigArgs struct { +// Args args +// +// swagger:model Args +type Args struct { // description + // Example: command line arguments // Required: true Description string `json:"Description"` // name + // Example: args // Required: true Name string `json:"Name"` @@ -117,73 +133,90 @@ type PluginConfigArgs struct { Value []string `json:"Value"` } -// PluginConfigInterface The interface between Docker and the plugin -// swagger:model PluginConfigInterface -type PluginConfigInterface struct { +// Interface The interface between Docker and the plugin +// +// swagger:model Interface +type Interface struct { // Protocol to use for clients connecting to the plugin. + // Example: some.protocol/v1.0 + // Enum: ["","moby.plugins.http/v1"] ProtocolScheme string `json:"ProtocolScheme,omitempty"` // socket + // Example: plugins.sock // Required: true Socket string `json:"Socket"` // types + // Example: ["docker.volumedriver/1.0"] // Required: true - Types []PluginInterfaceType `json:"Types"` + Types []CapabilityID `json:"Types"` } -// PluginConfigLinux plugin config linux -// swagger:model PluginConfigLinux -type PluginConfigLinux struct { +// LinuxConfig linux config +// +// swagger:model LinuxConfig +type LinuxConfig struct { // allow all devices + // Example: false // Required: true AllowAllDevices bool `json:"AllowAllDevices"` // capabilities + // Example: ["CAP_SYS_ADMIN","CAP_SYSLOG"] // Required: true Capabilities []string `json:"Capabilities"` // devices // Required: true - Devices []PluginDevice `json:"Devices"` + Devices []Device `json:"Devices"` } -// PluginConfigNetwork plugin config network -// swagger:model PluginConfigNetwork -type PluginConfigNetwork struct { +// NetworkConfig network config +// +// swagger:model NetworkConfig +type NetworkConfig struct { // type + // Example: host // Required: true Type string `json:"Type"` } -// PluginConfigRootfs plugin config rootfs -// swagger:model PluginConfigRootfs -type PluginConfigRootfs struct { +// RootFS root f s +// +// swagger:model RootFS +type RootFS struct { // diff ids + // Example: ["sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887","sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"] DiffIds []string `json:"diff_ids"` // type + // Example: layers Type string `json:"type,omitempty"` } -// PluginConfigUser plugin config user -// swagger:model PluginConfigUser -type PluginConfigUser struct { +// User user +// +// swagger:model User +type User struct { // g ID + // Example: 1000 GID uint32 `json:"GID,omitempty"` // UID + // Example: 1000 UID uint32 `json:"UID,omitempty"` } -// PluginSettings Settings that can be modified by users. -// swagger:model PluginSettings -type PluginSettings struct { +// Settings user-configurable settings for the plugin. +// +// swagger:model Settings +type Settings struct { // args // Required: true @@ -191,13 +224,14 @@ type PluginSettings struct { // devices // Required: true - Devices []PluginDevice `json:"Devices"` + Devices []Device `json:"Devices"` // env + // Example: ["DEBUG=0"] // Required: true Env []string `json:"Env"` // mounts // Required: true - Mounts []PluginMount `json:"Mounts"` + Mounts []Mount `json:"Mounts"` } diff --git a/vendor/github.com/moby/moby/api/types/plugin/plugin_responses.go b/vendor/github.com/moby/moby/api/types/plugin/plugin_responses.go new file mode 100644 index 000000000..91b327eb4 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/plugin_responses.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "sort" +) + +// ListResponse contains the response for the Engine API +type ListResponse []Plugin + +// Privilege describes a permission the user has to accept +// upon installing a plugin. +type Privilege struct { + Name string + Description string + Value []string +} + +// Privileges is a list of Privilege +type Privileges []Privilege + +func (s Privileges) Len() int { + return len(s) +} + +func (s Privileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s Privileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/moby/moby/api/types/registry/auth_response.go b/vendor/github.com/moby/moby/api/types/registry/auth_response.go new file mode 100644 index 000000000..94c2e1bb3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/auth_response.go @@ -0,0 +1,21 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package registry + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// AuthResponse An identity token was generated successfully. +// +// swagger:model AuthResponse +type AuthResponse struct { + + // An opaque token used to authenticate a user after a successful login + // Example: 9cbaf023786cd7... + IdentityToken string `json:"IdentityToken,omitempty"` + + // The status of the authentication + // Example: Login Succeeded + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/moby/moby/api/types/registry/authconfig.go b/vendor/github.com/moby/moby/api/types/registry/authconfig.go new file mode 100644 index 000000000..b612feeba --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/authconfig.go @@ -0,0 +1,35 @@ +package registry + +import "context" + +// AuthHeader is the name of the header used to send encoded registry +// authorization credentials for registry operations (push/pull). +const AuthHeader = "X-Registry-Auth" + +// RequestAuthConfig is a function interface that clients can supply +// to retry operations after getting an authorization error. +// +// The function must return the [AuthHeader] value ([AuthConfig]), encoded +// in base64url format ([RFC4648, section 5]), which can be decoded by +// [DecodeAuthConfig]. +// +// It must return an error if the privilege request fails. +// +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 +type RequestAuthConfig func(context.Context) (string, error) + +// AuthConfig contains authorization information for connecting to a Registry. +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/registry/registry.go b/vendor/github.com/moby/moby/api/types/registry/registry.go new file mode 100644 index 000000000..7361228d6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/registry.go @@ -0,0 +1,67 @@ +package registry + +import ( + "net/netip" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []netip.Prefix `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor ocispec.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/registry/search.go b/vendor/github.com/moby/moby/api/types/registry/search.go similarity index 63% rename from vendor/github.com/docker/docker/api/types/registry/search.go rename to vendor/github.com/moby/moby/api/types/registry/search.go index 994ca4c6f..bd79462f6 100644 --- a/vendor/github.com/docker/docker/api/types/registry/search.go +++ b/vendor/github.com/moby/moby/api/types/registry/search.go @@ -1,26 +1,5 @@ package registry -import ( - "context" - - "github.com/docker/docker/api/types/filters" -) - -// SearchOptions holds parameters to search images with. -type SearchOptions struct { - RegistryAuth string - - // PrivilegeFunc is a function that clients can supply to retry operations - // after getting an authorization error. This function returns the registry - // authentication header value in base64 encoded format, or an error if the - // privilege request fails. - // - // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. - PrivilegeFunc func(context.Context) (string, error) - Filters filters.Args - Limit int -} - // SearchResult describes a search result returned from a registry type SearchResult struct { // StarCount indicates the number of stars this repository has diff --git a/vendor/github.com/docker/docker/api/types/storage/driver_data.go b/vendor/github.com/moby/moby/api/types/storage/driver_data.go similarity index 61% rename from vendor/github.com/docker/docker/api/types/storage/driver_data.go rename to vendor/github.com/moby/moby/api/types/storage/driver_data.go index 009e21309..65d5b4c20 100644 --- a/vendor/github.com/docker/docker/api/types/storage/driver_data.go +++ b/vendor/github.com/moby/moby/api/types/storage/driver_data.go @@ -1,3 +1,5 @@ +// Code generated by go-swagger; DO NOT EDIT. + package storage // This file was generated by the swagger tool. @@ -14,10 +16,12 @@ type DriverData struct { // This information is driver-specific, and depends on the storage-driver // in use, and should be used for informational purposes only. // + // Example: {"MergedDir":"/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged","UpperDir":"/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff","WorkDir":"/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work"} // Required: true Data map[string]string `json:"Data"` // Name of the storage driver. + // Example: overlay2 // Required: true Name string `json:"Name"` } diff --git a/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage.go b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage.go new file mode 100644 index 000000000..d82f2b6bc --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// RootFSStorage Information about the storage used for the container's root filesystem. +// +// swagger:model RootFSStorage +type RootFSStorage struct { + + // Information about the snapshot used for the container's root filesystem. + // + Snapshot *RootFSStorageSnapshot `json:"Snapshot,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage_snapshot.go b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage_snapshot.go new file mode 100644 index 000000000..dd2b82d24 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage_snapshot.go @@ -0,0 +1,15 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// RootFSStorageSnapshot Information about a snapshot backend of the container's root filesystem. +// +// swagger:model RootFSStorageSnapshot +type RootFSStorageSnapshot struct { + + // Name of the snapshotter. + Name string `json:"Name,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/storage/storage.go b/vendor/github.com/moby/moby/api/types/storage/storage.go new file mode 100644 index 000000000..77843db97 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/storage/storage.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Storage Information about the storage used by the container. +// +// swagger:model Storage +type Storage struct { + + // Information about the storage used for the container's root filesystem. + // + RootFS *RootFSStorage `json:"RootFS,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/moby/moby/api/types/swarm/common.go similarity index 95% rename from vendor/github.com/docker/docker/api/types/swarm/common.go rename to vendor/github.com/moby/moby/api/types/swarm/common.go index 5ded7dba8..b42812e03 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ b/vendor/github.com/moby/moby/api/types/swarm/common.go @@ -1,4 +1,4 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( "strconv" diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/moby/moby/api/types/swarm/config.go similarity index 86% rename from vendor/github.com/docker/docker/api/types/swarm/config.go rename to vendor/github.com/moby/moby/api/types/swarm/config.go index bdec82ffb..b029f2af8 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ b/vendor/github.com/moby/moby/api/types/swarm/config.go @@ -1,9 +1,7 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( "os" - - "github.com/docker/docker/api/types/filters" ) // Config represents a config. @@ -55,8 +53,3 @@ type ConfigCreateResponse struct { // ID is the id of the created config. ID string } - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/moby/moby/api/types/swarm/container.go similarity index 95% rename from vendor/github.com/docker/docker/api/types/swarm/container.go rename to vendor/github.com/moby/moby/api/types/swarm/container.go index 30e3de70c..268565ec8 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/moby/moby/api/types/swarm/container.go @@ -1,10 +1,11 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( + "net/netip" "time" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/mount" ) // DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) @@ -14,7 +15,7 @@ import ( // TODO: `domain` is not supported yet. type DNSConfig struct { // Nameservers specifies the IP addresses of the name servers - Nameservers []string `json:",omitempty"` + Nameservers []netip.Addr `json:",omitempty"` // Search specifies the search list for host-name lookup Search []string `json:",omitempty"` // Options allows certain internal resolver variables to be modified diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/moby/moby/api/types/swarm/network.go similarity index 79% rename from vendor/github.com/docker/docker/api/types/swarm/network.go rename to vendor/github.com/moby/moby/api/types/swarm/network.go index 98ef3284d..65aabc9d3 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ b/vendor/github.com/moby/moby/api/types/swarm/network.go @@ -1,7 +1,9 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( - "github.com/docker/docker/api/types/network" + "net/netip" + + "github.com/moby/moby/api/types/network" ) // Endpoint represents an endpoint. @@ -30,7 +32,7 @@ const ( // PortConfig represents the config of a port. type PortConfig struct { Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` + Protocol network.IPProtocol `json:",omitempty"` // TargetPort is the port inside the container TargetPort uint32 `json:",omitempty"` // PublishedPort is the port on the swarm hosts @@ -52,24 +54,14 @@ const ( PortConfigPublishModeHost PortConfigPublishMode = "host" ) -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" - // PortConfigProtocolSCTP SCTP - PortConfigProtocolSCTP PortConfigProtocol = "sctp" -) - // EndpointVirtualIP represents the virtual ip of a port. type EndpointVirtualIP struct { NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` + + // Addr is the virtual ip address. + // This field accepts CIDR notation, for example `10.0.0.1/24`, to maintain backwards + // compatibility, but only the IP address is used. + Addr netip.Prefix `json:",omitempty"` } // Network represents a network. @@ -103,8 +95,12 @@ type NetworkAttachmentConfig struct { // NetworkAttachment represents a network attachment. type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` + Network Network `json:",omitempty"` + + // Addresses contains the IP addresses associated with the endpoint in the network. + // This field accepts CIDR notation, for example `10.0.0.1/24`, to maintain backwards + // compatibility, but only the IP address is used. + Addresses []netip.Prefix `json:",omitempty"` } // IPAMOptions represents ipam options. @@ -115,7 +111,7 @@ type IPAMOptions struct { // IPAMConfig represents ipam configuration. type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` + Subnet netip.Prefix `json:",omitempty"` + Range netip.Prefix `json:",omitempty"` + Gateway netip.Addr `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/moby/moby/api/types/swarm/node.go similarity index 91% rename from vendor/github.com/docker/docker/api/types/swarm/node.go rename to vendor/github.com/moby/moby/api/types/swarm/node.go index f175b1b2c..9523799b6 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/vendor/github.com/moby/moby/api/types/swarm/node.go @@ -1,5 +1,4 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" -import "github.com/docker/docker/api/types/filters" +package swarm // Node represents a node. type Node struct { @@ -132,19 +131,9 @@ const ( ) // Topology defines the CSI topology of this node. This type is a duplicate of -// github.com/docker/docker/api/types.Topology. Because the type definition +// [github.com/moby/moby/api/types/volume.Topology]. Because the type definition // is so simple and to avoid complicated structure or circular imports, we just // duplicate it here. See that type for full documentation type Topology struct { Segments map[string]string `json:",omitempty"` } - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/moby/moby/api/types/swarm/runtime.go similarity index 54% rename from vendor/github.com/docker/docker/api/types/swarm/runtime.go rename to vendor/github.com/moby/moby/api/types/swarm/runtime.go index 0c77403cc..23ea712c4 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ b/vendor/github.com/moby/moby/api/types/swarm/runtime.go @@ -1,4 +1,4 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm // RuntimeType is the type of runtime used for the TaskSpec type RuntimeType string @@ -25,3 +25,21 @@ const ( type NetworkAttachmentSpec struct { ContainerID string } + +// RuntimeSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type RuntimeSpec struct { + Name string `json:"name,omitempty"` + Remote string `json:"remote,omitempty"` + Privileges []*RuntimePrivilege `json:"privileges,omitempty"` + Disabled bool `json:"disabled,omitempty"` + Env []string `json:"env,omitempty"` +} + +// RuntimePrivilege describes a permission the user has to accept +// upon installing a plugin. +type RuntimePrivilege struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Value []string `json:"value,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/moby/moby/api/types/swarm/secret.go similarity index 84% rename from vendor/github.com/docker/docker/api/types/swarm/secret.go rename to vendor/github.com/moby/moby/api/types/swarm/secret.go index 248cffab8..0e27ed9b0 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ b/vendor/github.com/moby/moby/api/types/swarm/secret.go @@ -1,9 +1,7 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( "os" - - "github.com/docker/docker/api/types/filters" ) // Secret represents a secret. @@ -25,7 +23,7 @@ type SecretSpec struct { // This field is only used to create the secret, and is not returned // by other endpoints. // - // [MaxSecretSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize + // [MaxSecretSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize Data []byte `json:",omitempty"` // Driver is the name of the secrets driver used to fetch the secret's @@ -59,8 +57,3 @@ type SecretCreateResponse struct { // ID is the id of the created secret. ID string } - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/moby/moby/api/types/swarm/service.go similarity index 71% rename from vendor/github.com/docker/docker/api/types/swarm/service.go rename to vendor/github.com/moby/moby/api/types/swarm/service.go index 1d0a9a47b..0b678dea3 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ b/vendor/github.com/moby/moby/api/types/swarm/service.go @@ -1,9 +1,7 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( "time" - - "github.com/docker/docker/api/types/filters" ) // Service represents a service. @@ -37,12 +35,7 @@ type ServiceSpec struct { Mode ServiceMode `json:",omitempty"` UpdateConfig *UpdateConfig `json:",omitempty"` RollbackConfig *UpdateConfig `json:",omitempty"` - - // Networks specifies which networks the service should attach to. - // - // Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` } // ServiceMode represents the mode of a service. @@ -113,18 +106,27 @@ type ReplicatedJob struct { // This type is deliberately empty. type GlobalJob struct{} +// FailureAction is the action to perform when updating a service fails. +type FailureAction string + const ( // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" + UpdateFailureActionPause FailureAction = "pause" // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" + UpdateFailureActionContinue FailureAction = "continue" // UpdateFailureActionRollback ROLLBACK - UpdateFailureActionRollback = "rollback" + UpdateFailureActionRollback FailureAction = "rollback" +) +// UpdateOrder is the order of operations when rolling out or rolling back +// an updated tasks for a service. +type UpdateOrder string + +const ( // UpdateOrderStopFirst STOP_FIRST - UpdateOrderStopFirst = "stop-first" + UpdateOrderStopFirst UpdateOrder = "stop-first" // UpdateOrderStartFirst START_FIRST - UpdateOrderStartFirst = "start-first" + UpdateOrderStartFirst UpdateOrder = "start-first" ) // UpdateConfig represents the update configuration. @@ -137,7 +139,7 @@ type UpdateConfig struct { Delay time.Duration `json:",omitempty"` // FailureAction is the action to take when an update failures. - FailureAction string `json:",omitempty"` + FailureAction FailureAction `json:",omitempty"` // Monitor indicates how long to monitor a task for failure after it is // created. If the task fails by ending up in one of the states @@ -163,7 +165,7 @@ type UpdateConfig struct { // Order indicates the order of operations when rolling out an updated // task. Either the old task is shut down before the new task is // started, or the new task is started before the old task is shut down. - Order string + Order UpdateOrder } // ServiceStatus represents the number of running tasks in a service and the @@ -205,68 +207,12 @@ type JobStatus struct { LastExecution time.Time `json:",omitempty"` } -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} +// RegistryAuthSource defines options for the "registryAuthFrom" query parameter +// on service update. +type RegistryAuthSource string // Values for RegistryAuthFrom in ServiceUpdateOptions const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" + RegistryAuthFromSpec RegistryAuthSource = "spec" + RegistryAuthFromPreviousSpec RegistryAuthSource = "previous-spec" ) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args - - // Status indicates whether the server should include the service task - // count of running and desired tasks. - Status bool -} - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go b/vendor/github.com/moby/moby/api/types/swarm/service_create_response.go similarity index 73% rename from vendor/github.com/docker/docker/api/types/swarm/service_create_response.go rename to vendor/github.com/moby/moby/api/types/swarm/service_create_response.go index 9a268ff1b..ebbc097d9 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go +++ b/vendor/github.com/moby/moby/api/types/swarm/service_create_response.go @@ -1,3 +1,5 @@ +// Code generated by go-swagger; DO NOT EDIT. + package swarm // This file was generated by the swagger tool. @@ -10,11 +12,13 @@ package swarm type ServiceCreateResponse struct { // The ID of the created service. + // Example: ak7w3gjqoa3kuz8xcpnyy0pvl ID string `json:"ID,omitempty"` // Optional warning message. // // FIXME(thaJeztah): this should have "omitempty" in the generated type. // + // Example: ["unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"] Warnings []string `json:"Warnings"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go b/vendor/github.com/moby/moby/api/types/swarm/service_update_response.go similarity index 66% rename from vendor/github.com/docker/docker/api/types/swarm/service_update_response.go rename to vendor/github.com/moby/moby/api/types/swarm/service_update_response.go index 0417467da..b7649096a 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go +++ b/vendor/github.com/moby/moby/api/types/swarm/service_update_response.go @@ -1,9 +1,13 @@ +// Code generated by go-swagger; DO NOT EDIT. + package swarm // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // ServiceUpdateResponse service update response +// Example: {"Warnings":["unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"]} +// // swagger:model ServiceUpdateResponse type ServiceUpdateResponse struct { diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/moby/moby/api/types/swarm/swarm.go similarity index 92% rename from vendor/github.com/docker/docker/api/types/swarm/swarm.go rename to vendor/github.com/moby/moby/api/types/swarm/swarm.go index 2711c9eac..842185031 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/moby/moby/api/types/swarm/swarm.go @@ -1,6 +1,7 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( + "net/netip" "time" ) @@ -12,7 +13,7 @@ type ClusterInfo struct { Spec Spec TLSInfo TLSInfo RootRotationInProgress bool - DefaultAddrPool []string + DefaultAddrPool []netip.Prefix SubnetSize uint32 DataPathPort uint32 } @@ -159,7 +160,7 @@ type InitRequest struct { Spec Spec AutoLockManagers bool Availability NodeAvailability - DefaultAddrPool []string + DefaultAddrPool []netip.Prefix SubnetSize uint32 } @@ -213,29 +214,12 @@ type Info struct { Warnings []string `json:",omitempty"` } -// Status provides information about the current swarm status and role, -// obtained from the "Swarm" header in the API response. -type Status struct { - // NodeState represents the state of the node. - NodeState LocalNodeState - - // ControlAvailable indicates if the node is a swarm manager. - ControlAvailable bool -} - // Peer represents a peer. type Peer struct { NodeID string Addr string } -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool - RotateManagerUnlockKey bool -} - // UnlockKeyResponse contains the response for Engine API: // GET /swarm/unlockkey type UnlockKeyResponse struct { diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/moby/moby/api/types/swarm/task.go similarity index 93% rename from vendor/github.com/docker/docker/api/types/swarm/task.go rename to vendor/github.com/moby/moby/api/types/swarm/task.go index 722d1ceb6..f61190683 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/moby/moby/api/types/swarm/task.go @@ -1,10 +1,7 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" +package swarm import ( "time" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm/runtime" ) // TaskState represents the state of a task. @@ -77,7 +74,7 @@ type TaskSpec struct { // NetworkAttachmentSpec is used if the `Runtime` field is set to // `attachment`. ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` + PluginSpec *RuntimeSpec `json:",omitempty"` NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` Resources *ResourceRequirements `json:",omitempty"` @@ -141,6 +138,17 @@ type DiscreteGenericResource struct { type ResourceRequirements struct { Limits *Limit `json:",omitempty"` Reservations *Resources `json:",omitempty"` + + // Amount of swap in bytes - can only be used together with a memory limit + // -1 means unlimited + // a null pointer keeps the default behaviour of granting twice the memory + // amount in swap + SwapBytes *int64 `json:"SwapBytes,omitzero"` + + // Tune container memory swappiness (0 to 100) - if not specified, defaults + // to the container OS's default - generally 60, or the value predefined in + // the image; set to -1 to unset a previously set value + MemorySwappiness *int64 `json:MemorySwappiness,omitzero"` } // Placement represents orchestration parameters. @@ -224,8 +232,3 @@ type VolumeAttachment struct { // in the ContainerSpec, that this volume fulfills. Target string `json:",omitempty"` } - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} diff --git a/vendor/github.com/moby/moby/api/types/system/disk_usage.go b/vendor/github.com/moby/moby/api/types/system/disk_usage.go new file mode 100644 index 000000000..33230aed2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/system/disk_usage.go @@ -0,0 +1,31 @@ +package system + +import ( + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/image" + "github.com/moby/moby/api/types/volume" +) + +// DiskUsageObject represents an object type used for disk usage query filtering. +type DiskUsageObject string + +const ( + // ContainerObject represents a container DiskUsageObject. + ContainerObject DiskUsageObject = "container" + // ImageObject represents an image DiskUsageObject. + ImageObject DiskUsageObject = "image" + // VolumeObject represents a volume DiskUsageObject. + VolumeObject DiskUsageObject = "volume" + // BuildCacheObject represents a build-cache DiskUsageObject. + BuildCacheObject DiskUsageObject = "build-cache" +) + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + ImageUsage *image.DiskUsage `json:"ImageUsage,omitempty"` + ContainerUsage *container.DiskUsage `json:"ContainerUsage,omitempty"` + VolumeUsage *volume.DiskUsage `json:"VolumeUsage,omitempty"` + BuildCacheUsage *build.DiskUsage `json:"BuildCacheUsage,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/moby/moby/api/types/system/info.go similarity index 89% rename from vendor/github.com/docker/docker/api/types/system/info.go rename to vendor/github.com/moby/moby/api/types/system/info.go index 047639ed9..bca0459df 100644 --- a/vendor/github.com/docker/docker/api/types/system/info.go +++ b/vendor/github.com/moby/moby/api/types/system/info.go @@ -1,9 +1,11 @@ package system import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" + "net/netip" + + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/api/types/swarm" ) // Info contains response of Engine API: @@ -21,8 +23,6 @@ type Info struct { Plugins PluginsInfo MemoryLimit bool SwapLimit bool - KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` CPUShares bool @@ -139,16 +139,11 @@ type PluginsInfo struct { type Commit struct { // ID is the actual commit ID or version of external tool. ID string - - // Expected is the commit ID of external tool expected by dockerd as set at build time. - // - // Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions. - Expected string `json:",omitempty"` } // NetworkAddressPool is a temp struct used by [Info] struct. type NetworkAddressPool struct { - Base string + Base netip.Prefix Size int } diff --git a/vendor/github.com/docker/docker/api/types/system/runtime.go b/vendor/github.com/moby/moby/api/types/system/runtime.go similarity index 79% rename from vendor/github.com/docker/docker/api/types/system/runtime.go rename to vendor/github.com/moby/moby/api/types/system/runtime.go index d077295a0..33cad3674 100644 --- a/vendor/github.com/docker/docker/api/types/system/runtime.go +++ b/vendor/github.com/moby/moby/api/types/system/runtime.go @@ -9,8 +9,8 @@ type Runtime struct { // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. - Type string `json:"runtimeType,omitempty"` - Options map[string]interface{} `json:"options,omitempty"` + Type string `json:"runtimeType,omitempty"` + Options map[string]any `json:"options,omitempty"` } // RuntimeWithStatus extends [Runtime] to hold [RuntimeStatus]. diff --git a/vendor/github.com/moby/moby/api/types/system/version_response.go b/vendor/github.com/moby/moby/api/types/system/version_response.go new file mode 100644 index 000000000..61cd1b6e2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/system/version_response.go @@ -0,0 +1,58 @@ +package system + +// VersionResponse contains information about the Docker server host. +// GET "/version" +type VersionResponse struct { + // Platform is the platform (product name) the server is running on. + Platform PlatformInfo `json:",omitempty"` + + // Version is the version of the daemon. + Version string + + // APIVersion is the highest API version supported by the server. + APIVersion string `json:"ApiVersion"` + + // MinAPIVersion is the minimum API version the server supports. + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + + // Os is the operating system the server runs on. + Os string + + // Arch is the hardware architecture the server runs on. + Arch string + + // Components contains version information for the components making + // up the server. Information in this field is for informational + // purposes, and not part of the API contract. + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + GitCommit string `json:",omitempty"` + GoVersion string `json:",omitempty"` + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// PlatformInfo holds information about the platform (product name) the +// server is running on. +type PlatformInfo struct { + // Name is the name of the platform (for example, "Docker Engine - Community", + // or "Docker Desktop 4.49.0 (208003)") + Name string +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + + // Details contains Key/value pairs of strings with additional information + // about the component. These values are intended for informational purposes + // only, and their content is not defined, and not part of the API + // specification. + // + // These messages can be printed by the client as information to the user. + Details map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/types.go b/vendor/github.com/moby/moby/api/types/types.go new file mode 100644 index 000000000..a89befdda --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/types.go @@ -0,0 +1,18 @@ +package types + +const ( + // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams. + MediaTypeRawStream = "application/vnd.docker.raw-stream" + + // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams. + MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" + + // MediaTypeJSON is the MIME-Type for JSON objects. + MediaTypeJSON = "application/json" + + // MediaTypeNDJSON is the MIME-Type for Newline Delimited JSON objects streams. + MediaTypeNDJSON = "application/x-ndjson" + + // MediaTypeJSONSequence is the MIME-Type for JSON Text Sequences (RFC7464). + MediaTypeJSONSequence = "application/json-seq" +) diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/moby/moby/api/types/volume/cluster_volume.go similarity index 99% rename from vendor/github.com/docker/docker/api/types/volume/cluster_volume.go rename to vendor/github.com/moby/moby/api/types/volume/cluster_volume.go index 618a48162..07b75d12a 100644 --- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go +++ b/vendor/github.com/moby/moby/api/types/volume/cluster_volume.go @@ -1,7 +1,7 @@ package volume import ( - "github.com/docker/docker/api/types/swarm" + "github.com/moby/moby/api/types/swarm" ) // ClusterVolume contains options and information specific to, and only present diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/moby/moby/api/types/volume/create_request.go similarity index 65% rename from vendor/github.com/docker/docker/api/types/volume/create_options.go rename to vendor/github.com/moby/moby/api/types/volume/create_request.go index 37c41a609..3217df827 100644 --- a/vendor/github.com/docker/docker/api/types/volume/create_options.go +++ b/vendor/github.com/moby/moby/api/types/volume/create_request.go @@ -1,29 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + package volume // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// CreateOptions VolumeConfig +// CreateRequest VolumeConfig +// +// # Volume configuration // -// Volume configuration -// swagger:model CreateOptions -type CreateOptions struct { +// swagger:model CreateRequest +type CreateRequest struct { // cluster volume spec ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` // Name of the volume driver to use. + // Example: custom Driver string `json:"Driver,omitempty"` // A mapping of driver options and values. These options are // passed directly to the driver and are driver specific. // + // Example: {"device":"tmpfs","o":"size=100m,uid=1000","type":"tmpfs"} DriverOpts map[string]string `json:"DriverOpts,omitempty"` // User-defined key/value metadata. + // Example: {"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"} Labels map[string]string `json:"Labels,omitempty"` // The new volume's name. If not specified, Docker generates a name. // + // Example: tardis Name string `json:"Name,omitempty"` } diff --git a/vendor/github.com/moby/moby/api/types/volume/disk_usage.go b/vendor/github.com/moby/moby/api/types/volume/disk_usage.go new file mode 100644 index 000000000..e2afbac65 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage for volume resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active volumes. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of volumes. + // + Items []Volume `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing inactive volumes. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all volumes. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by volumes. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/moby/moby/api/types/volume/list_response.go similarity index 74% rename from vendor/github.com/docker/docker/api/types/volume/list_response.go rename to vendor/github.com/moby/moby/api/types/volume/list_response.go index ca5192a2a..f257762f0 100644 --- a/vendor/github.com/docker/docker/api/types/volume/list_response.go +++ b/vendor/github.com/moby/moby/api/types/volume/list_response.go @@ -1,3 +1,5 @@ +// Code generated by go-swagger; DO NOT EDIT. + package volume // This file was generated by the swagger tool. @@ -5,14 +7,16 @@ package volume // ListResponse VolumeListResponse // -// Volume list response +// # Volume list response +// // swagger:model ListResponse type ListResponse struct { // List of volumes - Volumes []*Volume `json:"Volumes"` + Volumes []Volume `json:"Volumes"` // Warnings that occurred when fetching the list of volumes. // + // Example: [] Warnings []string `json:"Warnings"` } diff --git a/vendor/github.com/moby/moby/api/types/volume/prune_report.go b/vendor/github.com/moby/moby/api/types/volume/prune_report.go new file mode 100644 index 000000000..7f501d01a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/prune_report.go @@ -0,0 +1,8 @@ +package volume + +// PruneReport contains the response for Engine API: +// POST "/volumes/prune" +type PruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume.go b/vendor/github.com/moby/moby/api/types/volume/volume.go similarity index 81% rename from vendor/github.com/docker/docker/api/types/volume/volume.go rename to vendor/github.com/moby/moby/api/types/volume/volume.go index ea7d555e5..524ebfb8a 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volume.go +++ b/vendor/github.com/moby/moby/api/types/volume/volume.go @@ -1,9 +1,12 @@ +// Code generated by go-swagger; DO NOT EDIT. + package volume // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command // Volume volume +// // swagger:model Volume type Volume struct { @@ -11,33 +14,41 @@ type Volume struct { ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` // Date/Time the volume was created. + // Example: 2016-06-07T20:31:11.853781916Z CreatedAt string `json:"CreatedAt,omitempty"` // Name of the volume driver used by the volume. + // Example: custom // Required: true Driver string `json:"Driver"` // User-defined key/value metadata. + // Example: {"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"} // Required: true Labels map[string]string `json:"Labels"` // Mount path of the volume on the host. + // Example: /var/lib/docker/volumes/tardis // Required: true Mountpoint string `json:"Mountpoint"` // Name of the volume. + // Example: tardis // Required: true Name string `json:"Name"` // The driver specific options used when creating the volume. // + // Example: {"device":"tmpfs","o":"size=100m,uid=1000","type":"tmpfs"} // Required: true Options map[string]string `json:"Options"` // The level at which the volume exists. Either `global` for cluster-wide, // or `local` for machine level. // + // Example: local // Required: true + // Enum: ["local","global"] Scope string `json:"Scope"` // Low-level details about the volume, provided by the volume driver. @@ -47,7 +58,8 @@ type Volume struct { // The `Status` field is optional, and is omitted if the volume driver // does not support this feature. // - Status map[string]interface{} `json:"Status,omitempty"` + // Example: {"hello":"world"} + Status map[string]any `json:"Status,omitempty"` // usage data UsageData *UsageData `json:"UsageData,omitempty"` diff --git a/vendor/github.com/moby/moby/client/LICENSE b/vendor/github.com/moby/moby/client/LICENSE new file mode 100644 index 000000000..6d8d58fb6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/moby/client/README.md b/vendor/github.com/moby/moby/client/README.md new file mode 100644 index 000000000..c8c5b96f9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/README.md @@ -0,0 +1,43 @@ +# Go client for the Docker Engine API + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/moby/moby/client)](https://pkg.go.dev/github.com/moby/moby/client) +![GitHub License](https://img.shields.io/github/license/moby/moby) +[![Go Report Card](https://goreportcard.com/badge/github.com/moby/moby/client)](https://goreportcard.com/report/github.com/moby/moby/client) +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/moby/moby/badge)](https://scorecard.dev/viewer/?uri=github.com/moby/moby) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/10989/badge)](https://www.bestpractices.dev/projects/10989) + +The `docker` command uses this package to communicate with the daemon. It can +also be used by your own Go applications to do anything the command-line +interface does; running containers, pulling or pushing images, etc. + +For example, to list all containers (the equivalent of `docker ps --all`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/moby/moby/client" +) + +func main() { + apiClient, err := client.New(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + panic(err) + } + defer apiClient.Close() + + containers, err := apiClient.ContainerList(context.Background(), client.ContainerListOptions{All: true}) + if err != nil { + panic(err) + } + + for _, ctr := range containers { + fmt.Printf("%s %s (status: %s)\n", ctr.ID, ctr.Image, ctr.Status) + } +} +``` + +[Full documentation is available on pkg.go.dev.](https://pkg.go.dev/github.com/moby/moby/client) diff --git a/vendor/github.com/moby/moby/client/auth.go b/vendor/github.com/moby/moby/client/auth.go new file mode 100644 index 000000000..8baf39d2c --- /dev/null +++ b/vendor/github.com/moby/moby/client/auth.go @@ -0,0 +1,14 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/registry" +) + +// staticAuth creates a privilegeFn from the given registryAuth. +func staticAuth(registryAuth string) registry.RequestAuthConfig { + return func(ctx context.Context) (string, error) { + return registryAuth, nil + } +} diff --git a/vendor/github.com/moby/moby/client/build_cancel.go b/vendor/github.com/moby/moby/client/build_cancel.go new file mode 100644 index 000000000..f6cfc6bc9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/build_cancel.go @@ -0,0 +1,21 @@ +package client + +import ( + "context" + "net/url" +) + +type BuildCancelOptions struct{} + +type BuildCancelResult struct{} + +// BuildCancel requests the daemon to cancel the ongoing build request +// with the given id. +func (cli *Client) BuildCancel(ctx context.Context, id string, _ BuildCancelOptions) (BuildCancelResult, error) { + query := url.Values{} + query.Set("id", id) + + resp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + defer ensureReaderClosed(resp) + return BuildCancelResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/build_prune.go b/vendor/github.com/moby/moby/client/build_prune.go new file mode 100644 index 000000000..a22e9685e --- /dev/null +++ b/vendor/github.com/moby/moby/client/build_prune.go @@ -0,0 +1,67 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "strconv" + + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/client/pkg/versions" +) + +// BuildCachePruneOptions hold parameters to prune the build cache. +type BuildCachePruneOptions struct { + All bool + ReservedSpace int64 + MaxUsedSpace int64 + MinFreeSpace int64 + Filters Filters +} + +// BuildCachePruneResult holds the result from the BuildCachePrune method. +type BuildCachePruneResult struct { + Report build.CachePruneReport +} + +// BuildCachePrune requests the daemon to delete unused cache data. +func (cli *Client) BuildCachePrune(ctx context.Context, opts BuildCachePruneOptions) (BuildCachePruneResult, error) { + var out BuildCachePruneResult + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + + if opts.ReservedSpace != 0 { + // Prior to API v1.48, 'keep-storage' was used to set the reserved space for the build cache. + // TODO(austinvazquez): remove once API v1.47 is no longer supported. See https://github.com/moby/moby/issues/50902 + if versions.LessThanOrEqualTo(cli.version, "1.47") { + query.Set("keep-storage", strconv.Itoa(int(opts.ReservedSpace))) + } else { + query.Set("reserved-space", strconv.Itoa(int(opts.ReservedSpace))) + } + } + if opts.MaxUsedSpace != 0 { + query.Set("max-used-space", strconv.Itoa(int(opts.MaxUsedSpace))) + } + if opts.MinFreeSpace != 0 { + query.Set("min-free-space", strconv.Itoa(int(opts.MinFreeSpace))) + } + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(resp) + + if err != nil { + return BuildCachePruneResult{}, err + } + + report := build.CachePruneReport{} + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return BuildCachePruneResult{}, fmt.Errorf("error retrieving disk usage: %w", err) + } + + out.Report = report + return out, nil +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_create.go b/vendor/github.com/moby/moby/client/checkpoint_create.go new file mode 100644 index 000000000..b3ba5459d --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_create.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/checkpoint" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container. +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointCreateResult holds the result from [client.CheckpointCreate]. +type CheckpointCreateResult struct { + // Add future fields here +} + +// CheckpointCreate creates a checkpoint from the given container. +func (cli *Client) CheckpointCreate(ctx context.Context, containerID string, options CheckpointCreateOptions) (CheckpointCreateResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return CheckpointCreateResult{}, err + } + requestBody := checkpoint.CreateRequest{ + CheckpointID: options.CheckpointID, + CheckpointDir: options.CheckpointDir, + Exit: options.Exit, + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/checkpoints", nil, requestBody, nil) + defer ensureReaderClosed(resp) + return CheckpointCreateResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_list.go b/vendor/github.com/moby/moby/client/checkpoint_list.go new file mode 100644 index 000000000..5815f836a --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/checkpoint" +) + +// CheckpointListOptions holds parameters to list checkpoints for a container. +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointListResult holds the result from the CheckpointList method. +type CheckpointListResult struct { + Items []checkpoint.Summary +} + +// CheckpointList returns the checkpoints of the given container in the docker host. +func (cli *Client) CheckpointList(ctx context.Context, container string, options CheckpointListOptions) (CheckpointListResult, error) { + var out CheckpointListResult + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return out, err + } + + err = json.NewDecoder(resp.Body).Decode(&out.Items) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_remove.go b/vendor/github.com/moby/moby/client/checkpoint_remove.go new file mode 100644 index 000000000..8042c5088 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_remove.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "net/url" +) + +// CheckpointRemoveOptions holds parameters to delete a checkpoint from a container. +type CheckpointRemoveOptions struct { + CheckpointID string + CheckpointDir string +} + +// CheckpointRemoveResult represents the result of [Client.CheckpointRemove]. +type CheckpointRemoveResult struct { + // No fields currently; placeholder for future use. +} + +// CheckpointRemove deletes the checkpoint with the given name from the given container. +func (cli *Client) CheckpointRemove(ctx context.Context, containerID string, options CheckpointRemoveOptions) (CheckpointRemoveResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return CheckpointRemoveResult{}, err + } + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + defer ensureReaderClosed(resp) + return CheckpointRemoveResult{}, err +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/moby/moby/client/client.go similarity index 61% rename from vendor/github.com/docker/docker/client/client.go rename to vendor/github.com/moby/moby/client/client.go index cd47f05eb..3d1186144 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/moby/moby/client/client.go @@ -6,10 +6,12 @@ https://docs.docker.com/reference/api/engine/ # Usage -You use the library by constructing a client object using [NewClientWithOpts] +You use the library by constructing a client object using [New] and calling methods on it. The client can be configured from environment -variables by passing the [FromEnv] option, or configured manually by passing any -of the other available [Opts]. +variables by passing the [FromEnv] option, and the [WithAPIVersionNegotiation] +option to allow downgrading the API version used when connecting with an older +daemon version. Other options cen be configured manually by passing any of +the available [Opt] options. For example, to list running containers (the equivalent of "docker ps"): @@ -18,32 +20,43 @@ For example, to list running containers (the equivalent of "docker ps"): import ( "context" "fmt" + "log" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/client" + "github.com/moby/moby/client" ) func main() { - cli, err := client.NewClientWithOpts(client.FromEnv) + // Create a new client that handles common environment variables + // for configuration (DOCKER_HOST, DOCKER_API_VERSION), and does + // API-version negotiation to allow downgrading the API version + // when connecting with an older daemon version. + apiClient, err := client.New(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { - panic(err) + log.Fatal(err) } - containers, err := cli.ContainerList(context.Background(), container.ListOptions{}) + // List all containers (both stopped and running). + result, err := apiClient.ContainerList(context.Background(), client.ContainerListOptions{ + All: true, + }) if err != nil { - panic(err) + log.Fatal(err) } - for _, ctr := range containers { - fmt.Printf("%s %s\n", ctr.ID, ctr.Image) + // Print each container's ID, status and the image it was created from. + fmt.Printf("%s %-22s %s\n", "ID", "STATUS", "IMAGE") + for _, ctr := range result.Items { + fmt.Printf("%s %-22s %s\n", ctr.ID, ctr.Status, ctr.Image) } } */ -package client // import "github.com/docker/docker/client" +package client import ( "context" "crypto/tls" + "errors" + "fmt" "net" "net/http" "net/url" @@ -53,11 +66,9 @@ import ( "sync/atomic" "time" - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" + cerrdefs "github.com/containerd/errdefs" "github.com/docker/go-connections/sockets" - "github.com/pkg/errors" + "github.com/moby/moby/client/pkg/versions" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" ) @@ -91,12 +102,19 @@ import ( // [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569 const DummyHost = "api.moby.localhost" -// fallbackAPIVersion is the version to fallback to if API-version negotiation -// fails. This version is the highest version of the API before API-version -// negotiation was introduced. If negotiation fails (or no API version was -// included in the API response), we assume the API server uses the most -// recent version before negotiation was introduced. -const fallbackAPIVersion = "1.24" +// MaxAPIVersion is the highest REST API version supported by the client. +// If API-version negotiation is enabled (see [WithAPIVersionNegotiation], +// [Client.NegotiateAPIVersion]), the client may downgrade its API version. +// Similarly, the [WithVersion] and [WithVersionFromEnv] allow overriding +// the version. +// +// This version may be lower than the version of the api library module used. +const MaxAPIVersion = "1.52" + +// fallbackAPIVersion is the version to fall back to if API-version negotiation +// fails. API versions below this version are not supported by the client, +// and not considered when negotiating. +const fallbackAPIVersion = "1.44" // Ensure that Client always implements APIClient. var _ APIClient = &Client{} @@ -104,35 +122,7 @@ var _ APIClient = &Client{} // Client is the API client that performs all operations // against a docker server. type Client struct { - // scheme sets the scheme for the client - scheme string - // host holds the server address to connect to - host string - // proto holds the client protocol i.e. unix. - proto string - // addr holds the client address. - addr string - // basePath holds the path to prepend to the requests. - basePath string - // client used to send and receive http requests. - client *http.Client - // version of the server to talk to. - version string - // userAgent is the User-Agent header to use for HTTP requests. It takes - // precedence over User-Agent headers set in customHTTPHeaders, and other - // header variables. When set to an empty string, the User-Agent header - // is removed, and no header is sent. - userAgent *string - // custom HTTP headers configured by users. - customHTTPHeaders map[string]string - // manualOverride is set to true when the version was set by users. - manualOverride bool - - // negotiateVersion indicates if the client should automatically negotiate - // the API version to use when making requests. API version negotiation is - // performed on the first request, after which negotiated is set to "true" - // so that subsequent requests do not re-negotiate. - negotiateVersion bool + clientConfig // negotiated indicates that API version negotiation took place negotiated atomic.Bool @@ -140,8 +130,6 @@ type Client struct { // negotiateLock is used to single-flight the version negotiation process negotiateLock sync.Mutex - traceOpts []otelhttp.Option - // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections). // Store the original transport as the http.Client transport will be wrapped with tracing libs. baseTransport *http.Transport @@ -172,7 +160,14 @@ func CheckRedirect(_ *http.Request, via []*http.Request) error { return ErrRedirect } -// NewClientWithOpts initializes a new API client with a default HTTPClient, and +// NewClientWithOpts initializes a new API client. +// +// Deprecated: use New. This function will be removed in the next release. +func NewClientWithOpts(ops ...Opt) (*Client, error) { + return New(ops...) +} + +// New initializes a new API client with a default HTTPClient, and // default API host and version. It also initializes the custom HTTP headers to // add to each request. // @@ -182,11 +177,11 @@ func CheckRedirect(_ *http.Request, via []*http.Request) error { // itself with values from environment variables ([FromEnv]), and has automatic // API version negotiation enabled ([WithAPIVersionNegotiation]). // -// cli, err := client.NewClientWithOpts( +// cli, err := client.New( // client.FromEnv, // client.WithAPIVersionNegotiation(), // ) -func NewClientWithOpts(ops ...Opt) (*Client, error) { +func New(ops ...Opt) (*Client, error) { hostURL, err := ParseHostURL(DefaultDockerHost) if err != nil { return nil, err @@ -197,21 +192,23 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) { return nil, err } c := &Client{ - host: DefaultDockerHost, - version: api.DefaultVersion, - client: client, - proto: hostURL.Scheme, - addr: hostURL.Host, - - traceOpts: []otelhttp.Option{ - otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { - return req.Method + " " + req.URL.Path - }), + clientConfig: clientConfig{ + host: DefaultDockerHost, + version: MaxAPIVersion, + client: client, + proto: hostURL.Scheme, + addr: hostURL.Host, + traceOpts: []otelhttp.Option{ + otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { + return req.Method + " " + req.URL.Path + }), + }, }, } + cfg := &c.clientConfig for _, op := range ops { - if err := op(c); err != nil { + if err := op(cfg); err != nil { return nil, err } } @@ -281,23 +278,13 @@ func (cli *Client) Close() error { // be negotiated when making the actual requests, and for which cases // we cannot do the negotiation lazily. func (cli *Client) checkVersion(ctx context.Context) error { - if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated.Load() { - // Ensure exclusive write access to version and negotiated fields - cli.negotiateLock.Lock() - defer cli.negotiateLock.Unlock() - - // May have been set during last execution of critical zone - if cli.negotiated.Load() { - return nil - } - - ping, err := cli.Ping(ctx) - if err != nil { - return err - } - cli.negotiateAPIVersionPing(ping) + if cli.manualOverride || !cli.negotiateVersion || cli.negotiated.Load() { + return nil } - return nil + _, err := cli.Ping(ctx, PingOptions{ + NegotiateAPIVersion: true, + }) + return err } // getAPIPath returns the versioned request path to call the API. @@ -318,75 +305,27 @@ func (cli *Client) ClientVersion() string { return cli.version } -// NegotiateAPIVersion queries the API and updates the version to match the API -// version. NegotiateAPIVersion downgrades the client's API version to match the -// APIVersion if the ping version is lower than the default version. If the API -// version reported by the server is higher than the maximum version supported -// by the client, it uses the client's maximum version. -// -// If a manual override is in place, either through the "DOCKER_API_VERSION" -// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized -// with a fixed version ([WithVersion]), no negotiation is performed. -// -// If the API server's ping response does not contain an API version, or if the -// client did not get a successful ping response, it assumes it is connected with -// an old daemon that does not support API version negotiation, in which case it -// downgrades to the latest version of the API before version negotiation was -// added (1.24). -func (cli *Client) NegotiateAPIVersion(ctx context.Context) { - if !cli.manualOverride { - // Avoid concurrent modification of version-related fields - cli.negotiateLock.Lock() - defer cli.negotiateLock.Unlock() - - ping, err := cli.Ping(ctx) - if err != nil { - // FIXME(thaJeztah): Ping returns an error when failing to connect to the API; we should not swallow the error here, and instead returning it. - return - } - cli.negotiateAPIVersionPing(ping) - } -} - -// NegotiateAPIVersionPing downgrades the client's API version to match the -// APIVersion in the ping response. If the API version in pingResponse is higher -// than the maximum version supported by the client, it uses the client's maximum -// version. -// -// If a manual override is in place, either through the "DOCKER_API_VERSION" -// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized -// with a fixed version ([WithVersion]), no negotiation is performed. -// -// If the API server's ping response does not contain an API version, we assume -// we are connected with an old daemon without API version negotiation support, -// and downgrade to the latest version of the API before version negotiation was -// added (1.24). -func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { - if !cli.manualOverride { - // Avoid concurrent modification of version-related fields - cli.negotiateLock.Lock() - defer cli.negotiateLock.Unlock() - - cli.negotiateAPIVersionPing(pingResponse) - } -} - -// negotiateAPIVersionPing queries the API and updates the version to match the -// API version from the ping response. -func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { - // default to the latest version before versioning headers existed - if pingResponse.APIVersion == "" { - pingResponse.APIVersion = fallbackAPIVersion +// negotiateAPIVersion updates the version to match the API version from +// the ping response. It falls back to the lowest version supported if the +// API version is empty, or returns an error if the API version is lower than +// the lowest supported API version, in which case the version is not modified. +func (cli *Client) negotiateAPIVersion(pingVersion string) error { + pingVersion = strings.TrimPrefix(pingVersion, "v") + if pingVersion == "" { + // TODO(thaJeztah): consider returning an error on empty value or not falling back; see https://github.com/moby/moby/pull/51119#discussion_r2413148487 + pingVersion = fallbackAPIVersion + } else if versions.LessThan(pingVersion, fallbackAPIVersion) { + return cerrdefs.ErrInvalidArgument.WithMessage(fmt.Sprintf("API version %s is not supported by this client: the minimum supported API version is %s", pingVersion, fallbackAPIVersion)) } // if the client is not initialized with a version, start with the latest supported version if cli.version == "" { - cli.version = api.DefaultVersion + cli.version = MaxAPIVersion } // if server version is lower than the client version, downgrade - if versions.LessThan(pingResponse.APIVersion, cli.version) { - cli.version = pingResponse.APIVersion + if versions.LessThan(pingVersion, cli.version) { + cli.version = pingVersion } // Store the results, so that automatic API version negotiation (if enabled) @@ -394,6 +333,7 @@ func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { if cli.negotiateVersion { cli.negotiated.Store(true) } + return nil } // DaemonHost returns the host address used by the client @@ -401,18 +341,12 @@ func (cli *Client) DaemonHost() string { return cli.host } -// HTTPClient returns a copy of the HTTP client bound to the server -func (cli *Client) HTTPClient() *http.Client { - c := *cli.client - return &c -} - // ParseHostURL parses a url string, validates the string is a host url, and // returns the parsed URL func ParseHostURL(host string) (*url.URL, error) { proto, addr, ok := strings.Cut(host, "://") if !ok || addr == "" { - return nil, errors.Errorf("unable to parse docker host `%s`", host) + return nil, fmt.Errorf("unable to parse docker host `%s`", host) } var basePath string @@ -463,7 +397,9 @@ func (cli *Client) dialer() func(context.Context) (net.Conn, error) { case "unix": return net.Dial(cli.proto, cli.addr) case "npipe": - return sockets.DialPipe(cli.addr, 32*time.Second) + ctx, cancel := context.WithTimeout(ctx, 32*time.Second) + defer cancel() + return dialPipeContext(ctx, cli.addr) default: if tlsConfig := cli.tlsConfig(); tlsConfig != nil { return tls.Dial(cli.proto, cli.addr, tlsConfig) diff --git a/vendor/github.com/moby/moby/client/client_interfaces.go b/vendor/github.com/moby/moby/client/client_interfaces.go new file mode 100644 index 000000000..4bbd45a6e --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_interfaces.go @@ -0,0 +1,242 @@ +package client + +import ( + "context" + "io" + "net" +) + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + stableAPIClient + CheckpointAPIClient // CheckpointAPIClient is still experimental. +} + +type stableAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + RegistrySearchClient + ExecAPIClient + ImageBuildAPIClient + ImageAPIClient + NetworkAPIClient + PluginAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + DaemonHost() string + ServerVersion(ctx context.Context, options ServerVersionOptions) (ServerVersionResult, error) + HijackDialer + Dialer() func(context.Context) (net.Conn, error) + Close() error + SwarmManagementAPIClient +} + +// SwarmManagementAPIClient defines all methods for managing Swarm-specific +// objects. +type SwarmManagementAPIClient interface { + SwarmAPIClient + NodeAPIClient + ServiceAPIClient + TaskAPIClient + SecretAPIClient + ConfigAPIClient +} + +// HijackDialer defines methods for a hijack dialer. +type HijackDialer interface { + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) +} + +// CheckpointAPIClient defines API client methods for the checkpoints. +// +// Experimental: checkpoint and restore is still an experimental feature, +// and only available if the daemon is running with experimental features +// enabled. +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options CheckpointCreateOptions) (CheckpointCreateResult, error) + CheckpointRemove(ctx context.Context, container string, options CheckpointRemoveOptions) (CheckpointRemoveResult, error) + CheckpointList(ctx context.Context, container string, options CheckpointListOptions) (CheckpointListResult, error) +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerCreate(ctx context.Context, options ContainerCreateOptions) (ContainerCreateResult, error) + ContainerInspect(ctx context.Context, container string, options ContainerInspectOptions) (ContainerInspectResult, error) + ContainerList(ctx context.Context, options ContainerListOptions) (ContainerListResult, error) + ContainerUpdate(ctx context.Context, container string, updateConfig ContainerUpdateOptions) (ContainerUpdateResult, error) + ContainerRemove(ctx context.Context, container string, options ContainerRemoveOptions) (ContainerRemoveResult, error) + ContainerPrune(ctx context.Context, opts ContainerPruneOptions) (ContainerPruneResult, error) + + ContainerLogs(ctx context.Context, container string, options ContainerLogsOptions) (ContainerLogsResult, error) + + ContainerStart(ctx context.Context, container string, options ContainerStartOptions) (ContainerStartResult, error) + ContainerStop(ctx context.Context, container string, options ContainerStopOptions) (ContainerStopResult, error) + ContainerRestart(ctx context.Context, container string, options ContainerRestartOptions) (ContainerRestartResult, error) + ContainerPause(ctx context.Context, container string, options ContainerPauseOptions) (ContainerPauseResult, error) + ContainerUnpause(ctx context.Context, container string, options ContainerUnpauseOptions) (ContainerUnpauseResult, error) + ContainerWait(ctx context.Context, container string, options ContainerWaitOptions) ContainerWaitResult + ContainerKill(ctx context.Context, container string, options ContainerKillOptions) (ContainerKillResult, error) + + ContainerRename(ctx context.Context, container string, options ContainerRenameOptions) (ContainerRenameResult, error) + ContainerResize(ctx context.Context, container string, options ContainerResizeOptions) (ContainerResizeResult, error) + ContainerAttach(ctx context.Context, container string, options ContainerAttachOptions) (ContainerAttachResult, error) + ContainerCommit(ctx context.Context, container string, options ContainerCommitOptions) (ContainerCommitResult, error) + ContainerDiff(ctx context.Context, container string, options ContainerDiffOptions) (ContainerDiffResult, error) + ContainerExport(ctx context.Context, container string, options ContainerExportOptions) (ContainerExportResult, error) + + ContainerStats(ctx context.Context, container string, options ContainerStatsOptions) (ContainerStatsResult, error) + ContainerTop(ctx context.Context, container string, options ContainerTopOptions) (ContainerTopResult, error) + + ContainerStatPath(ctx context.Context, container string, options ContainerStatPathOptions) (ContainerStatPathResult, error) + CopyFromContainer(ctx context.Context, container string, options CopyFromContainerOptions) (CopyFromContainerResult, error) + CopyToContainer(ctx context.Context, container string, options CopyToContainerOptions) (CopyToContainerResult, error) +} + +type ExecAPIClient interface { + ExecCreate(ctx context.Context, container string, options ExecCreateOptions) (ExecCreateResult, error) + ExecInspect(ctx context.Context, execID string, options ExecInspectOptions) (ExecInspectResult, error) + ExecResize(ctx context.Context, execID string, options ExecResizeOptions) (ExecResizeResult, error) + + ExecStart(ctx context.Context, execID string, options ExecStartOptions) (ExecStartResult, error) + ExecAttach(ctx context.Context, execID string, options ExecAttachOptions) (ExecAttachResult, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image string, options DistributionInspectOptions) (DistributionInspectResult, error) +} + +type RegistrySearchClient interface { + ImageSearch(ctx context.Context, term string, options ImageSearchOptions) (ImageSearchResult, error) +} + +// ImageBuildAPIClient defines API client methods for building images +// using the REST API. +type ImageBuildAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options ImageBuildOptions) (ImageBuildResult, error) + BuildCachePrune(ctx context.Context, opts BuildCachePruneOptions) (BuildCachePruneResult, error) + BuildCancel(ctx context.Context, id string, opts BuildCancelOptions) (BuildCancelResult, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageImport(ctx context.Context, source ImageImportSource, ref string, options ImageImportOptions) (ImageImportResult, error) + + ImageList(ctx context.Context, options ImageListOptions) (ImageListResult, error) + ImagePull(ctx context.Context, ref string, options ImagePullOptions) (ImagePullResponse, error) + ImagePush(ctx context.Context, ref string, options ImagePushOptions) (ImagePushResponse, error) + ImageRemove(ctx context.Context, image string, options ImageRemoveOptions) (ImageRemoveResult, error) + ImageTag(ctx context.Context, options ImageTagOptions) (ImageTagResult, error) + ImagePrune(ctx context.Context, opts ImagePruneOptions) (ImagePruneResult, error) + + ImageInspect(ctx context.Context, image string, _ ...ImageInspectOption) (ImageInspectResult, error) + ImageHistory(ctx context.Context, image string, _ ...ImageHistoryOption) (ImageHistoryResult, error) + + ImageLoad(ctx context.Context, input io.Reader, _ ...ImageLoadOption) (ImageLoadResult, error) + ImageSave(ctx context.Context, images []string, _ ...ImageSaveOption) (ImageSaveResult, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkCreate(ctx context.Context, name string, options NetworkCreateOptions) (NetworkCreateResult, error) + NetworkInspect(ctx context.Context, network string, options NetworkInspectOptions) (NetworkInspectResult, error) + NetworkList(ctx context.Context, options NetworkListOptions) (NetworkListResult, error) + NetworkRemove(ctx context.Context, network string, options NetworkRemoveOptions) (NetworkRemoveResult, error) + NetworkPrune(ctx context.Context, opts NetworkPruneOptions) (NetworkPruneResult, error) + + NetworkConnect(ctx context.Context, network string, options NetworkConnectOptions) (NetworkConnectResult, error) + NetworkDisconnect(ctx context.Context, network string, options NetworkDisconnectOptions) (NetworkDisconnectResult, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspect(ctx context.Context, nodeID string, options NodeInspectOptions) (NodeInspectResult, error) + NodeList(ctx context.Context, options NodeListOptions) (NodeListResult, error) + NodeUpdate(ctx context.Context, nodeID string, options NodeUpdateOptions) (NodeUpdateResult, error) + NodeRemove(ctx context.Context, nodeID string, options NodeRemoveOptions) (NodeRemoveResult, error) +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginCreate(ctx context.Context, createContext io.Reader, options PluginCreateOptions) (PluginCreateResult, error) + PluginInstall(ctx context.Context, name string, options PluginInstallOptions) (PluginInstallResult, error) + PluginInspect(ctx context.Context, name string, options PluginInspectOptions) (PluginInspectResult, error) + PluginList(ctx context.Context, options PluginListOptions) (PluginListResult, error) + PluginRemove(ctx context.Context, name string, options PluginRemoveOptions) (PluginRemoveResult, error) + + PluginEnable(ctx context.Context, name string, options PluginEnableOptions) (PluginEnableResult, error) + PluginDisable(ctx context.Context, name string, options PluginDisableOptions) (PluginDisableResult, error) + PluginUpgrade(ctx context.Context, name string, options PluginUpgradeOptions) (PluginUpgradeResult, error) + PluginPush(ctx context.Context, name string, options PluginPushOptions) (PluginPushResult, error) + PluginSet(ctx context.Context, name string, options PluginSetOptions) (PluginSetResult, error) +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, options ServiceCreateOptions) (ServiceCreateResult, error) + ServiceInspect(ctx context.Context, serviceID string, options ServiceInspectOptions) (ServiceInspectResult, error) + ServiceList(ctx context.Context, options ServiceListOptions) (ServiceListResult, error) + ServiceUpdate(ctx context.Context, serviceID string, options ServiceUpdateOptions) (ServiceUpdateResult, error) + ServiceRemove(ctx context.Context, serviceID string, options ServiceRemoveOptions) (ServiceRemoveResult, error) + + ServiceLogs(ctx context.Context, serviceID string, options ServiceLogsOptions) (ServiceLogsResult, error) +} + +// TaskAPIClient defines API client methods to manage swarm tasks. +type TaskAPIClient interface { + TaskInspect(ctx context.Context, taskID string, options TaskInspectOptions) (TaskInspectResult, error) + TaskList(ctx context.Context, options TaskListOptions) (TaskListResult, error) + + TaskLogs(ctx context.Context, taskID string, options TaskLogsOptions) (TaskLogsResult, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, options SwarmInitOptions) (SwarmInitResult, error) + SwarmJoin(ctx context.Context, options SwarmJoinOptions) (SwarmJoinResult, error) + SwarmInspect(ctx context.Context, options SwarmInspectOptions) (SwarmInspectResult, error) + SwarmUpdate(ctx context.Context, options SwarmUpdateOptions) (SwarmUpdateResult, error) + SwarmLeave(ctx context.Context, options SwarmLeaveOptions) (SwarmLeaveResult, error) + + SwarmGetUnlockKey(ctx context.Context) (SwarmGetUnlockKeyResult, error) + SwarmUnlock(ctx context.Context, options SwarmUnlockOptions) (SwarmUnlockResult, error) +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options EventsListOptions) EventsResult + Info(ctx context.Context, options InfoOptions) (SystemInfoResult, error) + RegistryLogin(ctx context.Context, auth RegistryLoginOptions) (RegistryLoginResult, error) + DiskUsage(ctx context.Context, options DiskUsageOptions) (DiskUsageResult, error) + Ping(ctx context.Context, options PingOptions) (PingResult, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options VolumeCreateOptions) (VolumeCreateResult, error) + VolumeInspect(ctx context.Context, volumeID string, options VolumeInspectOptions) (VolumeInspectResult, error) + VolumeList(ctx context.Context, options VolumeListOptions) (VolumeListResult, error) + VolumeUpdate(ctx context.Context, volumeID string, options VolumeUpdateOptions) (VolumeUpdateResult, error) + VolumeRemove(ctx context.Context, volumeID string, options VolumeRemoveOptions) (VolumeRemoveResult, error) + VolumePrune(ctx context.Context, options VolumePruneOptions) (VolumePruneResult, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretCreate(ctx context.Context, options SecretCreateOptions) (SecretCreateResult, error) + SecretInspect(ctx context.Context, id string, options SecretInspectOptions) (SecretInspectResult, error) + SecretList(ctx context.Context, options SecretListOptions) (SecretListResult, error) + SecretUpdate(ctx context.Context, id string, options SecretUpdateOptions) (SecretUpdateResult, error) + SecretRemove(ctx context.Context, id string, options SecretRemoveOptions) (SecretRemoveResult, error) +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigCreate(ctx context.Context, options ConfigCreateOptions) (ConfigCreateResult, error) + ConfigInspect(ctx context.Context, id string, options ConfigInspectOptions) (ConfigInspectResult, error) + ConfigList(ctx context.Context, options ConfigListOptions) (ConfigListResult, error) + ConfigUpdate(ctx context.Context, id string, options ConfigUpdateOptions) (ConfigUpdateResult, error) + ConfigRemove(ctx context.Context, id string, options ConfigRemoveOptions) (ConfigRemoveResult, error) +} diff --git a/vendor/github.com/moby/moby/client/client_unix.go b/vendor/github.com/moby/moby/client/client_unix.go new file mode 100644 index 000000000..1fb9fbfb9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_unix.go @@ -0,0 +1,18 @@ +//go:build !windows + +package client + +import ( + "context" + "net" + "syscall" +) + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "unix:///var/run/docker.sock" + +// dialPipeContext connects to a Windows named pipe. It is not supported on non-Windows. +func dialPipeContext(_ context.Context, _ string) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/moby/moby/client/client_windows.go b/vendor/github.com/moby/moby/client/client_windows.go new file mode 100644 index 000000000..b471c0612 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_windows.go @@ -0,0 +1,17 @@ +package client + +import ( + "context" + "net" + + "github.com/Microsoft/go-winio" +) + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +// dialPipeContext connects to a Windows named pipe. It is not supported on non-Windows. +func dialPipeContext(ctx context.Context, addr string) (net.Conn, error) { + return winio.DialPipeContext(ctx, addr) +} diff --git a/vendor/github.com/moby/moby/client/config_create.go b/vendor/github.com/moby/moby/client/config_create.go new file mode 100644 index 000000000..874e2c947 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigCreateOptions holds options for creating a config. +type ConfigCreateOptions struct { + Spec swarm.ConfigSpec +} + +// ConfigCreateResult holds the result from the ConfigCreate method. +type ConfigCreateResult struct { + ID string +} + +// ConfigCreate creates a new config. +func (cli *Client) ConfigCreate(ctx context.Context, options ConfigCreateOptions) (ConfigCreateResult, error) { + resp, err := cli.post(ctx, "/configs/create", nil, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigCreateResult{}, err + } + + var out swarm.ConfigCreateResponse + err = json.NewDecoder(resp.Body).Decode(&out) + if err != nil { + return ConfigCreateResult{}, err + } + return ConfigCreateResult{ID: out.ID}, nil +} diff --git a/vendor/github.com/moby/moby/client/config_inspect.go b/vendor/github.com/moby/moby/client/config_inspect.go new file mode 100644 index 000000000..0bf0ff791 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigInspectOptions holds options for inspecting a config. +type ConfigInspectOptions struct { + // Add future optional parameters here +} + +// ConfigInspectResult holds the result from the ConfigInspect method. +type ConfigInspectResult struct { + Config swarm.Config + Raw json.RawMessage +} + +// ConfigInspect returns the config information with raw data +func (cli *Client) ConfigInspect(ctx context.Context, id string, options ConfigInspectOptions) (ConfigInspectResult, error) { + id, err := trimID("config", id) + if err != nil { + return ConfigInspectResult{}, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + if err != nil { + return ConfigInspectResult{}, err + } + + var out ConfigInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Config) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/config_list.go b/vendor/github.com/moby/moby/client/config_list.go new file mode 100644 index 000000000..ee5e7fee7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters Filters +} + +// ConfigListResult holds the result from the [client.ConfigList] method. +type ConfigListResult struct { + Items []swarm.Config +} + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options ConfigListOptions) (ConfigListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + + resp, err := cli.get(ctx, "/configs", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigListResult{}, err + } + + var out ConfigListResult + err = json.NewDecoder(resp.Body).Decode(&out.Items) + if err != nil { + return ConfigListResult{}, err + } + return out, nil +} diff --git a/vendor/github.com/moby/moby/client/config_remove.go b/vendor/github.com/moby/moby/client/config_remove.go new file mode 100644 index 000000000..c77a4c378 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_remove.go @@ -0,0 +1,25 @@ +package client + +import "context" + +type ConfigRemoveOptions struct { + // Add future optional parameters here +} + +type ConfigRemoveResult struct { + // Add future fields here +} + +// ConfigRemove removes a config. +func (cli *Client) ConfigRemove(ctx context.Context, id string, options ConfigRemoveOptions) (ConfigRemoveResult, error) { + id, err := trimID("config", id) + if err != nil { + return ConfigRemoveResult{}, err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigRemoveResult{}, err + } + return ConfigRemoveResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/config_update.go b/vendor/github.com/moby/moby/client/config_update.go new file mode 100644 index 000000000..2651f4b2f --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_update.go @@ -0,0 +1,32 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigUpdateOptions holds options for updating a config. +type ConfigUpdateOptions struct { + Version swarm.Version + Spec swarm.ConfigSpec +} + +type ConfigUpdateResult struct{} + +// ConfigUpdate attempts to update a config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, options ConfigUpdateOptions) (ConfigUpdateResult, error) { + id, err := trimID("config", id) + if err != nil { + return ConfigUpdateResult{}, err + } + query := url.Values{} + query.Set("version", options.Version.String()) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigUpdateResult{}, err + } + return ConfigUpdateResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_attach.go b/vendor/github.com/moby/moby/client/container_attach.go new file mode 100644 index 000000000..fa3a2efcc --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_attach.go @@ -0,0 +1,86 @@ +package client + +import ( + "context" + "net/http" + "net/url" +) + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerAttachResult is the result from attaching to a container. +type ContainerAttachResult struct { + HijackedResponse +} + +// ContainerAttach attaches a connection to a container in the server. +// It returns a [HijackedResponse] with the hijacked connection +// and a reader to get output. It's up to the called to close +// the hijacked connection by calling [HijackedResponse.Close]. +// +// The stream format on the response uses one of two formats: +// +// - If the container is using a TTY, there is only a single stream (stdout) +// and data is copied directly from the container output stream, no extra +// multiplexing or headers. +// - If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// +// The format of the multiplexed stream is defined in the [stdcopy] package, +// and as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for [Stdout] and 2 for [Stderr]. Refer to [stdcopy.StdType] +// for details. SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded +// as big endian, this is the size of OUTPUT. You can use [stdcopy.StdCopy] +// to demultiplex this stream. +// +// [stdcopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy +// [stdcopy.StdCopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdCopy +// [stdcopy.StdType]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdType +// [Stdout]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stdout +// [Stderr]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stderr +func (cli *Client) ContainerAttach(ctx context.Context, containerID string, options ContainerAttachOptions) (ContainerAttachResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerAttachResult{}, err + } + + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + hijacked, err := cli.postHijacked(ctx, "/containers/"+containerID+"/attach", query, nil, http.Header{ + "Content-Type": {"text/plain"}, + }) + if err != nil { + return ContainerAttachResult{}, err + } + + return ContainerAttachResult{HijackedResponse: hijacked}, nil +} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/moby/moby/client/container_commit.go similarity index 55% rename from vendor/github.com/docker/docker/client/container_commit.go rename to vendor/github.com/moby/moby/client/container_commit.go index 4838ac71d..79da44a54 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/moby/moby/client/container_commit.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -7,25 +7,40 @@ import ( "net/url" "github.com/distribution/reference" - "github.com/docker/docker/api/types/container" + "github.com/moby/moby/api/types/container" ) +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + NoPause bool // NoPause disables pausing the container during commit. + Config *container.Config +} + +// ContainerCommitResult is the result from committing a container. +type ContainerCommitResult struct { + ID string +} + // ContainerCommit applies changes to a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, containerID string, options container.CommitOptions) (container.CommitResponse, error) { +func (cli *Client) ContainerCommit(ctx context.Context, containerID string, options ContainerCommitOptions) (ContainerCommitResult, error) { containerID, err := trimID("container", containerID) if err != nil { - return container.CommitResponse{}, err + return ContainerCommitResult{}, err } var repository, tag string if options.Reference != "" { ref, err := reference.ParseNormalizedNamed(options.Reference) if err != nil { - return container.CommitResponse{}, err + return ContainerCommitResult{}, err } - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return container.CommitResponse{}, errors.New("refusing to create a tag with a digest reference") + if _, ok := ref.(reference.Digested); ok { + return ContainerCommitResult{}, errors.New("refusing to create a tag with a digest reference") } ref = reference.TagNameOnly(ref) @@ -44,7 +59,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, containerID string, opti for _, change := range options.Changes { query.Add("changes", change) } - if !options.Pause { + if options.NoPause { query.Set("pause", "0") } @@ -52,9 +67,9 @@ func (cli *Client) ContainerCommit(ctx context.Context, containerID string, opti resp, err := cli.post(ctx, "/commit", query, options.Config, nil) defer ensureReaderClosed(resp) if err != nil { - return response, err + return ContainerCommitResult{}, err } err = json.NewDecoder(resp.Body).Decode(&response) - return response, err + return ContainerCommitResult{ID: response.ID}, err } diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/moby/moby/client/container_copy.go similarity index 58% rename from vendor/github.com/docker/docker/client/container_copy.go rename to vendor/github.com/moby/moby/client/container_copy.go index 39584d375..f76511246 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/moby/moby/client/container_copy.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -11,37 +11,60 @@ import ( "path/filepath" "strings" - "github.com/docker/docker/api/types/container" + "github.com/moby/moby/api/types/container" ) +type ContainerStatPathOptions struct { + Path string +} + +type ContainerStatPathResult struct { + Stat container.PathStat +} + // ContainerStatPath returns stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (container.PathStat, error) { +func (cli *Client) ContainerStatPath(ctx context.Context, containerID string, options ContainerStatPathOptions) (ContainerStatPathResult, error) { containerID, err := trimID("container", containerID) if err != nil { - return container.PathStat{}, err + return ContainerStatPathResult{}, err } query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + query.Set("path", filepath.ToSlash(options.Path)) // Normalize the paths used in the API. resp, err := cli.head(ctx, "/containers/"+containerID+"/archive", query, nil) defer ensureReaderClosed(resp) if err != nil { - return container.PathStat{}, err + return ContainerStatPathResult{}, err + } + stat, err := getContainerPathStatFromHeader(resp.Header) + if err != nil { + return ContainerStatPathResult{}, err } - return getContainerPathStatFromHeader(resp.Header) + return ContainerStatPathResult{Stat: stat}, nil } +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + DestinationPath string + Content io.Reader + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +type CopyToContainerResult struct{} + // CopyToContainer copies content into the container filesystem. // Note that `content` must be a Reader for a TAR archive -func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options container.CopyToContainerOptions) error { +func (cli *Client) CopyToContainer(ctx context.Context, containerID string, options CopyToContainerOptions) (CopyToContainerResult, error) { containerID, err := trimID("container", containerID) if err != nil { - return err + return CopyToContainerResult{}, err } query := url.Values{} - query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. + query.Set("path", filepath.ToSlash(options.DestinationPath)) // Normalize the paths used in the API. // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. if !options.AllowOverwriteDirWithFile { query.Set("noOverwriteDirNonDir", "true") @@ -51,29 +74,38 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str query.Set("copyUIDGID", "true") } - response, err := cli.putRaw(ctx, "/containers/"+containerID+"/archive", query, content, nil) + response, err := cli.putRaw(ctx, "/containers/"+containerID+"/archive", query, options.Content, nil) defer ensureReaderClosed(response) if err != nil { - return err + return CopyToContainerResult{}, err } - return nil + return CopyToContainerResult{}, nil +} + +type CopyFromContainerOptions struct { + SourcePath string +} + +type CopyFromContainerResult struct { + Content io.ReadCloser + Stat container.PathStat } // CopyFromContainer gets the content from the container and returns it as a Reader // for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, container.PathStat, error) { +func (cli *Client) CopyFromContainer(ctx context.Context, containerID string, options CopyFromContainerOptions) (CopyFromContainerResult, error) { containerID, err := trimID("container", containerID) if err != nil { - return nil, container.PathStat{}, err + return CopyFromContainerResult{}, err } query := make(url.Values, 1) - query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + query.Set("path", filepath.ToSlash(options.SourcePath)) // Normalize the paths used in the API. resp, err := cli.get(ctx, "/containers/"+containerID+"/archive", query, nil) if err != nil { - return nil, container.PathStat{}, err + return CopyFromContainerResult{}, err } // In order to get the copy behavior right, we need to know information @@ -84,9 +116,10 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s // can be when copying a file/dir from one location to another file/dir. stat, err := getContainerPathStatFromHeader(resp.Header) if err != nil { - return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + ensureReaderClosed(resp) + return CopyFromContainerResult{Stat: stat}, fmt.Errorf("unable to get resource stat from response: %s", err) } - return resp.Body, stat, err + return CopyFromContainerResult{Content: resp.Body, Stat: stat}, nil } func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) { diff --git a/vendor/github.com/moby/moby/client/container_create.go b/vendor/github.com/moby/moby/client/container_create.go new file mode 100644 index 000000000..d941a3720 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_create.go @@ -0,0 +1,125 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + "path" + "sort" + "strings" + + cerrdefs "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/container" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ContainerCreate creates a new container based on the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, options ContainerCreateOptions) (ContainerCreateResult, error) { + cfg := options.Config + + if cfg == nil { + cfg = &container.Config{} + } + + if options.Image != "" { + if cfg.Image != "" { + return ContainerCreateResult{}, cerrdefs.ErrInvalidArgument.WithMessage("either Image or config.Image should be set") + } + newCfg := *cfg + newCfg.Image = options.Image + cfg = &newCfg + } + + if cfg.Image == "" { + return ContainerCreateResult{}, cerrdefs.ErrInvalidArgument.WithMessage("config.Image or Image is required") + } + + var response container.CreateResponse + + if options.HostConfig != nil { + options.HostConfig.CapAdd = normalizeCapabilities(options.HostConfig.CapAdd) + options.HostConfig.CapDrop = normalizeCapabilities(options.HostConfig.CapDrop) + } + + query := url.Values{} + if options.Platform != nil { + if p := formatPlatform(*options.Platform); p != "unknown" { + query.Set("platform", p) + } + } + + if options.Name != "" { + query.Set("name", options.Name) + } + + body := container.CreateRequest{ + Config: cfg, + HostConfig: options.HostConfig, + NetworkingConfig: options.NetworkingConfig, + } + + resp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerCreateResult{}, err + } + + err = json.NewDecoder(resp.Body).Decode(&response) + return ContainerCreateResult{ID: response.ID, Warnings: response.Warnings}, err +} + +// formatPlatform returns a formatted string representing platform (e.g., "linux/arm/v7"). +// +// It is a fork of [platforms.Format], and does not yet support "os.version", +// as [platforms.FormatAll] does. +// +// [platforms.Format]: https://github.com/containerd/platforms/blob/v1.0.0-rc.1/platforms.go#L309-L316 +// [platforms.FormatAll]: https://github.com/containerd/platforms/blob/v1.0.0-rc.1/platforms.go#L318-L330 +func formatPlatform(platform ocispec.Platform) string { + if platform.OS == "" { + return "unknown" + } + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} + +// allCapabilities is a magic value for "all capabilities" +const allCapabilities = "ALL" + +// normalizeCapabilities normalizes capabilities to their canonical form, +// removes duplicates, and sorts the results. +// +// It is similar to [caps.NormalizeLegacyCapabilities], +// but performs no validation based on supported capabilities. +// +// [caps.NormalizeLegacyCapabilities]: https://github.com/moby/moby/blob/v28.3.2/oci/caps/utils.go#L56 +func normalizeCapabilities(caps []string) []string { + var normalized []string + + unique := make(map[string]struct{}) + for _, c := range caps { + c = normalizeCap(c) + if _, ok := unique[c]; ok { + continue + } + unique[c] = struct{}{} + normalized = append(normalized, c) + } + + sort.Strings(normalized) + return normalized +} + +// normalizeCap normalizes a capability to its canonical format by upper-casing +// and adding a "CAP_" prefix (if not yet present). It also accepts the "ALL" +// magic-value. +func normalizeCap(capability string) string { + capability = strings.ToUpper(capability) + if capability == allCapabilities { + return capability + } + if !strings.HasPrefix(capability, "CAP_") { + capability = "CAP_" + capability + } + return capability +} diff --git a/vendor/github.com/moby/moby/client/container_create_opts.go b/vendor/github.com/moby/moby/client/container_create_opts.go new file mode 100644 index 000000000..8580e20d3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_create_opts.go @@ -0,0 +1,25 @@ +package client + +import ( + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/network" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ContainerCreateOptions holds parameters to create a container. +type ContainerCreateOptions struct { + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *ocispec.Platform + Name string + + // Image is a shortcut for Config.Image - only one of Image or Config.Image should be set. + Image string +} + +// ContainerCreateResult is the result from creating a container. +type ContainerCreateResult struct { + ID string + Warnings []string +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/moby/moby/client/container_diff.go similarity index 64% rename from vendor/github.com/docker/docker/client/container_diff.go rename to vendor/github.com/moby/moby/client/container_diff.go index 52401898b..ec904337e 100644 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ b/vendor/github.com/moby/moby/client/container_diff.go @@ -1,30 +1,30 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" "net/url" - "github.com/docker/docker/api/types/container" + "github.com/moby/moby/api/types/container" ) // ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.FilesystemChange, error) { +func (cli *Client) ContainerDiff(ctx context.Context, containerID string, options ContainerDiffOptions) (ContainerDiffResult, error) { containerID, err := trimID("container", containerID) if err != nil { - return nil, err + return ContainerDiffResult{}, err } resp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) defer ensureReaderClosed(resp) if err != nil { - return nil, err + return ContainerDiffResult{}, err } var changes []container.FilesystemChange err = json.NewDecoder(resp.Body).Decode(&changes) if err != nil { - return nil, err + return ContainerDiffResult{}, err } - return changes, err + return ContainerDiffResult{Changes: changes}, err } diff --git a/vendor/github.com/moby/moby/client/container_diff_opts.go b/vendor/github.com/moby/moby/client/container_diff_opts.go new file mode 100644 index 000000000..5e3c37ab4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_diff_opts.go @@ -0,0 +1,13 @@ +package client + +import "github.com/moby/moby/api/types/container" + +// ContainerDiffOptions holds parameters to show differences in a container filesystem. +type ContainerDiffOptions struct { + // Currently no options, but this allows for future extensibility +} + +// ContainerDiffResult is the result from showing differences in a container filesystem. +type ContainerDiffResult struct { + Changes []container.FilesystemChange +} diff --git a/vendor/github.com/moby/moby/client/container_exec.go b/vendor/github.com/moby/moby/client/container_exec.go new file mode 100644 index 000000000..1af6a4e20 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_exec.go @@ -0,0 +1,205 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/container" +) + +// ExecCreateOptions is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecCreateOptions struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + TTY bool // Attach standard streams to a tty. + ConsoleSize ConsoleSize // Initial terminal size [height, width], unused if TTY == false + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// ExecCreateResult holds the result of creating a container exec. +type ExecCreateResult struct { + ID string +} + +// ExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ExecCreate(ctx context.Context, containerID string, options ExecCreateOptions) (ExecCreateResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ExecCreateResult{}, err + } + + consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize) + if err != nil { + return ExecCreateResult{}, err + } + + req := container.ExecCreateRequest{ + User: options.User, + Privileged: options.Privileged, + Tty: options.TTY, + ConsoleSize: consoleSize, + AttachStdin: options.AttachStdin, + AttachStderr: options.AttachStderr, + AttachStdout: options.AttachStdout, + DetachKeys: options.DetachKeys, + Env: options.Env, + WorkingDir: options.WorkingDir, + Cmd: options.Cmd, + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/exec", nil, req, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ExecCreateResult{}, err + } + + var response container.ExecCreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return ExecCreateResult{ID: response.ID}, err +} + +type ConsoleSize struct { + Height, Width uint +} + +// ExecStartOptions holds options for starting a container exec. +type ExecStartOptions struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + TTY bool + // Terminal size [height, width], unused if TTY == false + ConsoleSize ConsoleSize +} + +// ExecStartResult holds the result of starting a container exec. +type ExecStartResult struct { +} + +// ExecStart starts an exec process already created in the docker host. +func (cli *Client) ExecStart(ctx context.Context, execID string, options ExecStartOptions) (ExecStartResult, error) { + consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize) + if err != nil { + return ExecStartResult{}, err + } + + req := container.ExecStartRequest{ + Detach: options.Detach, + Tty: options.TTY, + ConsoleSize: consoleSize, + } + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, req, nil) + defer ensureReaderClosed(resp) + return ExecStartResult{}, err +} + +// ExecAttachOptions holds options for attaching to a container exec. +type ExecAttachOptions struct { + // Check if there's a tty + TTY bool + // Terminal size [height, width], unused if TTY == false + ConsoleSize ConsoleSize `json:",omitzero"` +} + +// ExecAttachResult holds the result of attaching to a container exec. +type ExecAttachResult struct { + HijackedResponse +} + +// ExecAttach attaches a connection to an exec process in the server. +// +// It returns a [HijackedResponse] with the hijacked connection +// and a reader to get output. It's up to the called to close +// the hijacked connection by calling [HijackedResponse.Close]. +// +// The stream format on the response uses one of two formats: +// +// - If the container is using a TTY, there is only a single stream (stdout) +// and data is copied directly from the container output stream, no extra +// multiplexing or headers. +// - If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// +// You can use [stdcopy.StdCopy] to demultiplex this stream. Refer to +// [Client.ContainerAttach] for details about the multiplexed stream. +// +// [stdcopy.StdCopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdCopy +func (cli *Client) ExecAttach(ctx context.Context, execID string, options ExecAttachOptions) (ExecAttachResult, error) { + consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize) + if err != nil { + return ExecAttachResult{}, err + } + req := container.ExecStartRequest{ + Detach: false, + Tty: options.TTY, + ConsoleSize: consoleSize, + } + response, err := cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, req, http.Header{ + "Content-Type": {"application/json"}, + }) + return ExecAttachResult{HijackedResponse: response}, err +} + +func getConsoleSize(hasTTY bool, consoleSize ConsoleSize) (*[2]uint, error) { + if consoleSize.Height != 0 || consoleSize.Width != 0 { + if !hasTTY { + return nil, errdefs.ErrInvalidArgument.WithMessage("console size is only supported when TTY is enabled") + } + return &[2]uint{consoleSize.Height, consoleSize.Width}, nil + } + return nil, nil +} + +// ExecInspectOptions holds options for inspecting a container exec. +type ExecInspectOptions struct { +} + +// ExecInspectResult holds the result of inspecting a container exec. +// +// It provides a subset of the information included in [container.ExecInspectResponse]. +// +// TODO(thaJeztah): include all fields of [container.ExecInspectResponse] ? +type ExecInspectResult struct { + ID string + ContainerID string + Running bool + ExitCode int + PID int +} + +// ExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ExecInspect(ctx context.Context, execID string, options ExecInspectOptions) (ExecInspectResult, error) { + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ExecInspectResult{}, err + } + + var response container.ExecInspectResponse + err = json.NewDecoder(resp.Body).Decode(&response) + if err != nil { + return ExecInspectResult{}, err + } + + var ec int + if response.ExitCode != nil { + ec = *response.ExitCode + } + + return ExecInspectResult{ + ID: response.ID, + ContainerID: response.ContainerID, + Running: response.Running, + ExitCode: ec, + PID: response.Pid, + }, nil +} diff --git a/vendor/github.com/moby/moby/client/container_export.go b/vendor/github.com/moby/moby/client/container_export.go new file mode 100644 index 000000000..2d33efb7d --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_export.go @@ -0,0 +1,47 @@ +package client + +import ( + "context" + "io" + "net/url" +) + +// ContainerExportOptions specifies options for container export operations. +type ContainerExportOptions struct { + // Currently no options are defined for ContainerExport +} + +// ContainerExportResult represents the result of a container export operation. +type ContainerExportResult interface { + io.ReadCloser +} + +// ContainerExport retrieves the raw contents of a container +// and returns them as an [io.ReadCloser]. It's up to the caller +// to close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ContainerExport(ctx context.Context, containerID string, options ContainerExportOptions) (ContainerExportResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return nil, err + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return &containerExportResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type containerExportResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*containerExportResult)(nil) + _ ContainerExportResult = (*containerExportResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/container_inspect.go b/vendor/github.com/moby/moby/client/container_inspect.go new file mode 100644 index 000000000..4f12c4657 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_inspect.go @@ -0,0 +1,47 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/container" +) + +// ContainerInspectOptions holds options for inspecting a container using +// the [Client.ConfigInspect] method. +type ContainerInspectOptions struct { + // Size controls whether the container's filesystem size should be calculated. + // When set, the [container.InspectResponse.SizeRw] and [container.InspectResponse.SizeRootFs] + // fields in [ContainerInspectResult.Container] are populated with the result. + // + // Calculating the size can be a costly operation, and should not be used + // unless needed. + Size bool +} + +// ContainerInspectResult holds the result from the [Client.ConfigInspect] method. +type ContainerInspectResult struct { + Container container.InspectResponse + Raw json.RawMessage +} + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string, options ContainerInspectOptions) (ContainerInspectResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerInspectResult{}, err + } + + query := url.Values{} + if options.Size { + query.Set("size", "1") + } + resp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + return ContainerInspectResult{}, err + } + var out ContainerInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Container) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/container_kill.go b/vendor/github.com/moby/moby/client/container_kill.go new file mode 100644 index 000000000..ae7a4ebd8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_kill.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "net/url" +) + +// ContainerKillOptions holds options for [Client.ContainerKill]. +type ContainerKillOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after a + // timeout. If no value is set, the default (SIGKILL) is used. + Signal string `json:",omitempty"` +} + +// ContainerKillResult holds the result of [Client.ContainerKill], +type ContainerKillResult struct { + // Add future fields here. +} + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID string, options ContainerKillOptions) (ContainerKillResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerKillResult{}, err + } + + query := url.Values{} + if options.Signal != "" { + query.Set("signal", options.Signal) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerKillResult{}, err + } + return ContainerKillResult{}, nil +} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/moby/moby/client/container_list.go similarity index 53% rename from vendor/github.com/docker/docker/client/container_list.go rename to vendor/github.com/moby/moby/client/container_list.go index 510bcdf68..fcd39c4da 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/moby/moby/client/container_list.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -6,12 +6,26 @@ import ( "net/url" "strconv" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" + "github.com/moby/moby/api/types/container" ) +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters Filters +} + +type ContainerListResult struct { + Items []container.Summary +} + // ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error) { +func (cli *Client) ContainerList(ctx context.Context, options ContainerListOptions) (ContainerListResult, error) { query := url.Values{} if options.All { @@ -34,23 +48,15 @@ func (cli *Client) ContainerList(ctx context.Context, options container.ListOpti query.Set("size", "1") } - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } + options.Filters.updateURLValues(query) resp, err := cli.get(ctx, "/containers/json", query, nil) defer ensureReaderClosed(resp) if err != nil { - return nil, err + return ContainerListResult{}, err } var containers []container.Summary err = json.NewDecoder(resp.Body).Decode(&containers) - return containers, err + return ContainerListResult{Items: containers}, err } diff --git a/vendor/github.com/moby/moby/client/container_logs.go b/vendor/github.com/moby/moby/client/container_logs.go new file mode 100644 index 000000000..636ab2212 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_logs.go @@ -0,0 +1,118 @@ +package client + +import ( + "context" + "fmt" + "io" + "net/url" + "time" + + "github.com/moby/moby/client/internal/timestamp" +) + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerLogsResult is the result of a container logs operation. +type ContainerLogsResult interface { + io.ReadCloser +} + +// ContainerLogs returns the logs generated by a container in an [io.ReadCloser]. +// It's up to the caller to close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +// +// The stream format on the response uses one of two formats: +// +// - If the container is using a TTY, there is only a single stream (stdout) +// and data is copied directly from the container output stream, no extra +// multiplexing or headers. +// - If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// +// The format of the multiplexed stream is defined in the [stdcopy] package, +// and as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for [Stdout] and 2 for [Stderr]. Refer to [stdcopy.StdType] +// for details. SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded +// as big endian, this is the size of OUTPUT. You can use [stdcopy.StdCopy] +// to demultiplex this stream. +// +// [stdcopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy +// [stdcopy.StdCopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdCopy +// [stdcopy.StdType]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdType +// [Stdout]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stdout +// [Stderr]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stderr +func (cli *Client) ContainerLogs(ctx context.Context, containerID string, options ContainerLogsOptions) (ContainerLogsResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return nil, err + } + + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, fmt.Errorf(`invalid value for "since": %w`, err) + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timestamp.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, fmt.Errorf(`invalid value for "until": %w`, err) + } + query.Set("until", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+containerID+"/logs", query, nil) + if err != nil { + return nil, err + } + return &containerLogsResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type containerLogsResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*containerLogsResult)(nil) + _ ContainerLogsResult = (*containerLogsResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/container_pause.go b/vendor/github.com/moby/moby/client/container_pause.go new file mode 100644 index 000000000..07669c897 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_pause.go @@ -0,0 +1,28 @@ +package client + +import "context" + +// ContainerPauseOptions holds options for [Client.ContainerPause]. +type ContainerPauseOptions struct { + // Add future optional parameters here. +} + +// ContainerPauseResult holds the result of [Client.ContainerPause], +type ContainerPauseResult struct { + // Add future fields here. +} + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string, options ContainerPauseOptions) (ContainerPauseResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerPauseResult{}, err + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerPauseResult{}, err + } + return ContainerPauseResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_prune.go b/vendor/github.com/moby/moby/client/container_prune.go new file mode 100644 index 000000000..f826f8b6f --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_prune.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/container" +) + +// ContainerPruneOptions holds parameters to prune containers. +type ContainerPruneOptions struct { + Filters Filters +} + +// ContainerPruneResult holds the result from the [Client.ContainerPrune] method. +type ContainerPruneResult struct { + Report container.PruneReport +} + +// ContainerPrune requests the daemon to delete unused data +func (cli *Client) ContainerPrune(ctx context.Context, opts ContainerPruneOptions) (ContainerPruneResult, error) { + query := url.Values{} + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerPruneResult{}, err + } + + var report container.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return ContainerPruneResult{}, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return ContainerPruneResult{Report: report}, nil +} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/moby/moby/client/container_remove.go similarity index 50% rename from vendor/github.com/docker/docker/client/container_remove.go rename to vendor/github.com/moby/moby/client/container_remove.go index 6661351a9..0fbfa05fa 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/moby/moby/client/container_remove.go @@ -1,17 +1,27 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "net/url" - - "github.com/docker/docker/api/types/container" ) +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerRemoveResult holds the result of [Client.ContainerRemove], +type ContainerRemoveResult struct { + // Add future fields here. +} + // ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error { +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options ContainerRemoveOptions) (ContainerRemoveResult, error) { containerID, err := trimID("container", containerID) if err != nil { - return err + return ContainerRemoveResult{}, err } query := url.Values{} @@ -28,5 +38,8 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) defer ensureReaderClosed(resp) - return err + if err != nil { + return ContainerRemoveResult{}, err + } + return ContainerRemoveResult{}, nil } diff --git a/vendor/github.com/moby/moby/client/container_rename.go b/vendor/github.com/moby/moby/client/container_rename.go new file mode 100644 index 000000000..7c6d515b3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_rename.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "net/url" + "strings" + + "github.com/containerd/errdefs" +) + +// ContainerRenameOptions represents the options for renaming a container. +type ContainerRenameOptions struct { + NewName string +} + +// ContainerRenameResult represents the result of a container rename operation. +type ContainerRenameResult struct { + // This struct can be expanded in the future if needed +} + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID string, options ContainerRenameOptions) (ContainerRenameResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerRenameResult{}, err + } + options.NewName = strings.TrimSpace(options.NewName) + if options.NewName == "" || strings.TrimPrefix(options.NewName, "/") == "" { + // daemons before v29.0 did not handle the canonical name ("/") well + // let's be nice and validate it here before sending + return ContainerRenameResult{}, errdefs.ErrInvalidArgument.WithMessage("new name cannot be blank") + } + + query := url.Values{} + query.Set("name", options.NewName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + defer ensureReaderClosed(resp) + return ContainerRenameResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/container_resize.go b/vendor/github.com/moby/moby/client/container_resize.go new file mode 100644 index 000000000..311a9dcf5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_resize.go @@ -0,0 +1,66 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// ContainerResizeOptions holds parameters to resize a TTY. +// It can be used to resize container TTYs and +// exec process TTYs too. +type ContainerResizeOptions struct { + Height uint + Width uint +} + +// ContainerResizeResult holds the result of [Client.ContainerResize], +type ContainerResizeResult struct { + // Add future fields here. +} + +// ContainerResize changes the size of the pseudo-TTY for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options ContainerResizeOptions) (ContainerResizeResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerResizeResult{}, err + } + // FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint. + query := url.Values{} + query.Set("h", strconv.FormatUint(uint64(options.Height), 10)) + query.Set("w", strconv.FormatUint(uint64(options.Width), 10)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/resize", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerResizeResult{}, err + } + return ContainerResizeResult{}, nil +} + +// ExecResizeOptions holds options for resizing a container exec TTY. +type ExecResizeOptions ContainerResizeOptions + +// ExecResizeResult holds the result of resizing a container exec TTY. +type ExecResizeResult struct { +} + +// ExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ExecResize(ctx context.Context, execID string, options ExecResizeOptions) (ExecResizeResult, error) { + execID, err := trimID("exec", execID) + if err != nil { + return ExecResizeResult{}, err + } + // FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint. + query := url.Values{} + query.Set("h", strconv.FormatUint(uint64(options.Height), 10)) + query.Set("w", strconv.FormatUint(uint64(options.Width), 10)) + + resp, err := cli.post(ctx, "/exec/"+execID+"/resize", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ExecResizeResult{}, err + } + return ExecResizeResult{}, nil + +} diff --git a/vendor/github.com/moby/moby/client/container_restart.go b/vendor/github.com/moby/moby/client/container_restart.go new file mode 100644 index 000000000..e883f7589 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_restart.go @@ -0,0 +1,54 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// ContainerRestartOptions holds options for [Client.ContainerRestart]. +type ContainerRestartOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If no value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + +// ContainerRestartResult holds the result of [Client.ContainerRestart], +type ContainerRestartResult struct { + // Add future fields here. +} + +// ContainerRestart stops, and starts a container again. +// It makes the daemon wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options ContainerRestartOptions) (ContainerRestartResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerRestartResult{}, err + } + + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerRestartResult{}, err + } + return ContainerRestartResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_start.go b/vendor/github.com/moby/moby/client/container_start.go new file mode 100644 index 000000000..dfb821d1d --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_start.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + "net/url" +) + +// ContainerStartOptions holds options for [Client.ContainerStart]. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerStartResult holds the result of [Client.ContainerStart], +type ContainerStartResult struct { + // Add future fields here. +} + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options ContainerStartOptions) (ContainerStartResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerStartResult{}, err + } + + query := url.Values{} + if options.CheckpointID != "" { + query.Set("checkpoint", options.CheckpointID) + } + if options.CheckpointDir != "" { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerStartResult{}, err + } + return ContainerStartResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_stats.go b/vendor/github.com/moby/moby/client/container_stats.go new file mode 100644 index 000000000..277769dbf --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stats.go @@ -0,0 +1,75 @@ +package client + +import ( + "context" + "io" + "net/url" +) + +// ContainerStatsOptions holds parameters to retrieve container statistics +// using the [Client.ContainerStats] method. +type ContainerStatsOptions struct { + // Stream enables streaming [container.StatsResponse] results instead + // of collecting a single sample. If enabled, the client remains attached + // until the [ContainerStatsResult.Body] is closed or the context is + // cancelled. + Stream bool + + // IncludePreviousSample asks the daemon to collect a prior sample to populate the + // [container.StatsResponse.PreRead] and [container.StatsResponse.PreCPUStats] + // fields. + // + // It set, the daemon collects two samples at a one-second interval before + // returning the result. The first sample populates the PreCPUStats (“previous + // CPU”) field, allowing delta calculations for CPU usage. If false, only + // a single sample is taken and returned immediately, leaving PreRead and + // PreCPUStats empty. + // + // This option has no effect if Stream is enabled. If Stream is enabled, + // [container.StatsResponse.PreCPUStats] is never populated for the first + // record. + IncludePreviousSample bool +} + +// ContainerStatsResult holds the result from [Client.ContainerStats]. +// +// It wraps an [io.ReadCloser] that provides one or more [container.StatsResponse] +// objects for a container, as produced by the "GET /containers/{id}/stats" endpoint. +// If streaming is disabled, the stream contains a single record. +type ContainerStatsResult struct { + Body io.ReadCloser +} + +// ContainerStats retrieves live resource usage statistics for the specified +// container. The caller must close the [io.ReadCloser] in the returned result +// to release associated resources. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ContainerStats(ctx context.Context, containerID string, options ContainerStatsOptions) (ContainerStatsResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerStatsResult{}, err + } + + query := url.Values{} + if options.Stream { + query.Set("stream", "true") + } else { + // Note: daemons before v29.0 return an error if both set: "cannot have stream=true and one-shot=true" + // + // TODO(thaJeztah): consider making "stream=false" the default for the API as well, or using Accept Header to switch. + query.Set("stream", "false") + if !options.IncludePreviousSample { + query.Set("one-shot", "true") + } + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return ContainerStatsResult{}, err + } + + return ContainerStatsResult{ + Body: newCancelReadCloser(ctx, resp.Body), + }, nil +} diff --git a/vendor/github.com/moby/moby/client/container_stop.go b/vendor/github.com/moby/moby/client/container_stop.go new file mode 100644 index 000000000..d4d47d8fd --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stop.go @@ -0,0 +1,58 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// ContainerStopOptions holds the options for [Client.ContainerStop]. +type ContainerStopOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If no value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + +// ContainerStopResult holds the result of [Client.ContainerStop], +type ContainerStopResult struct { + // Add future fields here. +} + +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, options ContainerStopOptions) (ContainerStopResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerStopResult{}, err + } + + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerStopResult{}, err + } + return ContainerStopResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_top.go b/vendor/github.com/moby/moby/client/container_top.go new file mode 100644 index 000000000..dc0af8ae4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_top.go @@ -0,0 +1,44 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + "strings" + + "github.com/moby/moby/api/types/container" +) + +// ContainerTopOptions defines options for container top operations. +type ContainerTopOptions struct { + Arguments []string +} + +// ContainerTopResult represents the result of a ContainerTop operation. +type ContainerTopResult struct { + Processes [][]string + Titles []string +} + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, options ContainerTopOptions) (ContainerTopResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerTopResult{}, err + } + + query := url.Values{} + if len(options.Arguments) > 0 { + query.Set("ps_args", strings.Join(options.Arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerTopResult{}, err + } + + var response container.TopResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return ContainerTopResult{Processes: response.Processes, Titles: response.Titles}, err +} diff --git a/vendor/github.com/moby/moby/client/container_unpause.go b/vendor/github.com/moby/moby/client/container_unpause.go new file mode 100644 index 000000000..627d60c96 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_unpause.go @@ -0,0 +1,28 @@ +package client + +import "context" + +// ContainerUnpauseOptions holds options for [Client.ContainerUnpause]. +type ContainerUnpauseOptions struct { + // Add future optional parameters here. +} + +// ContainerUnpauseResult holds the result of [Client.ContainerUnpause], +type ContainerUnpauseResult struct { + // Add future fields here. +} + +// ContainerUnpause resumes the process execution within a container. +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string, options ContainerUnpauseOptions) (ContainerUnpauseResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerUnpauseResult{}, err + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerUnpauseResult{}, err + } + return ContainerUnpauseResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_update.go b/vendor/github.com/moby/moby/client/container_update.go new file mode 100644 index 000000000..a1d4d249a --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_update.go @@ -0,0 +1,46 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/container" +) + +// ContainerUpdateOptions holds options for [Client.ContainerUpdate]. +type ContainerUpdateOptions struct { + Resources *container.Resources + RestartPolicy *container.RestartPolicy +} + +// ContainerUpdateResult is the result from updating a container. +type ContainerUpdateResult struct { + // Warnings encountered when updating the container. + Warnings []string +} + +// ContainerUpdate updates the resources of a container. +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, options ContainerUpdateOptions) (ContainerUpdateResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerUpdateResult{}, err + } + + updateConfig := container.UpdateConfig{} + if options.Resources != nil { + updateConfig.Resources = *options.Resources + } + if options.RestartPolicy != nil { + updateConfig.RestartPolicy = *options.RestartPolicy + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerUpdateResult{}, err + } + + var response container.UpdateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return ContainerUpdateResult{Warnings: response.Warnings}, err +} diff --git a/vendor/github.com/moby/moby/client/container_wait.go b/vendor/github.com/moby/moby/client/container_wait.go new file mode 100644 index 000000000..6f71ed051 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_wait.go @@ -0,0 +1,92 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/url" + + "github.com/moby/moby/api/types/container" +) + +const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ + +// ContainerWaitOptions holds options for [Client.ContainerWait]. +type ContainerWaitOptions struct { + Condition container.WaitCondition +} + +// ContainerWaitResult defines the result from the [Client.ContainerWait] method. +type ContainerWaitResult struct { + Result <-chan container.WaitResponse + Error <-chan error +} + +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either; +// +// - "not-running" ([container.WaitConditionNotRunning]) (default) +// - "next-exit" ([container.WaitConditionNextExit]) +// - "removed" ([container.WaitConditionRemoved]) +// +// ContainerWait blocks until the request has been acknowledged by the server +// (with a response header), then returns two channels on which the caller can +// wait for the exit status of the container or an error if there was a problem +// either beginning the wait request or in getting the response. This allows the +// caller to synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition ([container.WaitConditionNextExit]) before issuing a +// [Client.ContainerStart] request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, options ContainerWaitOptions) ContainerWaitResult { + resultC := make(chan container.WaitResponse) + errC := make(chan error, 1) + + containerID, err := trimID("container", containerID) + if err != nil { + errC <- err + return ContainerWaitResult{Result: resultC, Error: errC} + } + + query := url.Values{} + if options.Condition != "" { + query.Set("condition", string(options.Condition)) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return ContainerWaitResult{Result: resultC, Error: errC} + } + + go func() { + defer ensureReaderClosed(resp) + + responseText := bytes.NewBuffer(nil) + stream := io.TeeReader(resp.Body, responseText) + + var res container.WaitResponse + if err := json.NewDecoder(stream).Decode(&res); err != nil { + // NOTE(nicks): The /wait API does not work well with HTTP proxies. + // At any time, the proxy could cut off the response stream. + // + // But because the HTTP status has already been written, the proxy's + // only option is to write a plaintext error message. + // + // If there's a JSON parsing error, read the real error message + // off the body and send it to the client. + if errors.As(err, new(*json.SyntaxError)) { + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) + } else { + errC <- err + } + return + } + + resultC <- res + }() + + return ContainerWaitResult{Result: resultC, Error: errC} +} diff --git a/vendor/github.com/moby/moby/client/distribution_inspect.go b/vendor/github.com/moby/moby/client/distribution_inspect.go new file mode 100644 index 000000000..ffbf869d3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/distribution_inspect.go @@ -0,0 +1,45 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + + "github.com/moby/moby/api/types/registry" +) + +// DistributionInspectResult holds the result of the DistributionInspect operation. +type DistributionInspectResult struct { + registry.DistributionInspect +} + +// DistributionInspectOptions holds options for the DistributionInspect operation. +type DistributionInspectOptions struct { + EncodedRegistryAuth string +} + +// DistributionInspect returns the image digest with the full manifest. +func (cli *Client) DistributionInspect(ctx context.Context, imageRef string, options DistributionInspectOptions) (DistributionInspectResult, error) { + if imageRef == "" { + return DistributionInspectResult{}, objectNotFoundError{object: "distribution", id: imageRef} + } + + var headers http.Header + if options.EncodedRegistryAuth != "" { + headers = http.Header{ + registry.AuthHeader: {options.EncodedRegistryAuth}, + } + } + + // Contact the registry to retrieve digest and platform information + resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers) + defer ensureReaderClosed(resp) + if err != nil { + return DistributionInspectResult{}, err + } + + var distributionInspect registry.DistributionInspect + err = json.NewDecoder(resp.Body).Decode(&distributionInspect) + return DistributionInspectResult{DistributionInspect: distributionInspect}, err +} diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/moby/moby/client/envvars.go similarity index 72% rename from vendor/github.com/docker/docker/client/envvars.go rename to vendor/github.com/moby/moby/client/envvars.go index 61dd45c1d..2b0e3f6b5 100644 --- a/vendor/github.com/docker/docker/client/envvars.go +++ b/vendor/github.com/moby/moby/client/envvars.go @@ -1,19 +1,19 @@ -package client // import "github.com/docker/docker/client" +package client const ( // EnvOverrideHost is the name of the environment variable that can be used // to override the default host to connect to (DefaultDockerHost). // - // This env-var is read by FromEnv and WithHostFromEnv and when set to a + // This env-var is read by [FromEnv] and [WithHostFromEnv] and when set to a // non-empty value, takes precedence over the default host (which is platform // specific), or any host already set. EnvOverrideHost = "DOCKER_HOST" // EnvOverrideAPIVersion is the name of the environment variable that can - // be used to override the API version to use. Value should be + // be used to override the API version to use. Value must be // formatted as MAJOR.MINOR, for example, "1.19". // - // This env-var is read by FromEnv and WithVersionFromEnv and when set to a + // This env-var is read by [FromEnv] and [WithVersionFromEnv] and when set to a // non-empty value, takes precedence over API version negotiation. // // This environment variable should be used for debugging purposes only, as @@ -23,16 +23,15 @@ const ( // EnvOverrideCertPath is the name of the environment variable that can be // used to specify the directory from which to load the TLS certificates // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure - // the Client for a TCP connection protected by TLS client authentication. + // the [Client] for a TCP connection protected by TLS client authentication. // // TLS certificate verification is enabled by default if the Client is configured - // to use a TLS connection. Refer to EnvTLSVerify below to learn how to + // to use a TLS connection. Refer to [EnvTLSVerify] below to learn how to // disable verification for testing purposes. // // WARNING: Access to the remote API is equivalent to root access to the // host where the daemon runs. Do not expose the API without protection, - // and only if needed. Make sure you are familiar with the "daemon attack - // surface" (https://docs.docker.com/go/attack-surface/). + // and only if needed. Make sure you are familiar with the ["daemon attack surface"]. // // For local access to the API, it is recommended to connect with the daemon // using the default local socket connection (on Linux), or the named pipe @@ -43,11 +42,14 @@ const ( // configuration if the host is accessible using ssh. // // If you cannot use the alternatives above, and you must expose the API over - // a TCP connection, refer to https://docs.docker.com/engine/security/protect-access/ + // a TCP connection. Refer to [Protect the Docker daemon socket] // to learn how to configure the daemon and client to use a TCP connection // with TLS client authentication. Make sure you know the differences between // a regular TLS connection and a TLS connection protected by TLS client // authentication, and verify that the API cannot be accessed by other clients. + // + // ["daemon attack surface"]: https://docs.docker.com/go/attack-surface/ + // [Protect the Docker daemon socket]: https://docs.docker.com/engine/security/protect-access/ EnvOverrideCertPath = "DOCKER_CERT_PATH" // EnvTLSVerify is the name of the environment variable that can be used to @@ -59,26 +61,26 @@ const ( // // WARNING: Access to the remote API is equivalent to root access to the // host where the daemon runs. Do not expose the API without protection, - // and only if needed. Make sure you are familiar with the "daemon attack - // surface" (https://docs.docker.com/go/attack-surface/). + // and only if needed. Make sure you are familiar with the ["daemon attack surface"]. // // Before setting up your client and daemon to use a TCP connection with TLS // client authentication, consider using one of the alternatives mentioned - // in EnvOverrideCertPath above. + // in [EnvOverrideCertPath]. // // Disabling TLS certificate verification (for testing purposes) // // TLS certificate verification is enabled by default if the Client is configured // to use a TLS connection, and it is highly recommended to keep verification - // enabled to prevent machine-in-the-middle attacks. Refer to the documentation - // at https://docs.docker.com/engine/security/protect-access/ and pages linked - // from that page to learn how to configure the daemon and client to use a - // TCP connection with TLS client authentication enabled. + // enabled to prevent machine-in-the-middle attacks. Refer to [Protect the Docker daemon socket] + // in the documentation and pages linked from that page to learn how to + // configure the daemon and client to use a TCP connection with TLS client + // authentication enabled. // // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to // disable TLS certificate verification. Disabling verification is insecure, - // so should only be done for testing purposes. From the Go documentation - // (https://pkg.go.dev/crypto/tls#Config): + // so should only be done for testing purposes. + // + // From the[crypto/tls.Config] documentation: // // InsecureSkipVerify controls whether a client verifies the server's // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls @@ -86,5 +88,8 @@ const ( // certificate. In this mode, TLS is susceptible to machine-in-the-middle // attacks unless custom verification is used. This should be used only for // testing or in combination with VerifyConnection or VerifyPeerCertificate. + // + // ["daemon attack surface"]: https://docs.docker.com/go/attack-surface/ + // [Protect the Docker daemon socket]: https://docs.docker.com/engine/security/protect-access/ EnvTLSVerify = "DOCKER_TLS_VERIFY" ) diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/moby/moby/client/errors.go similarity index 75% rename from vendor/github.com/docker/docker/client/errors.go rename to vendor/github.com/moby/moby/client/errors.go index 7bd859314..9fbfa7666 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/moby/moby/client/errors.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -8,7 +8,7 @@ import ( cerrdefs "github.com/containerd/errdefs" "github.com/containerd/errdefs/pkg/errhttp" - "github.com/docker/docker/api/types/versions" + "github.com/moby/moby/client/pkg/versions" ) // errConnectionFailed implements an error returned when connection failed. @@ -30,13 +30,6 @@ func IsErrConnectionFailed(err error) bool { return errors.As(err, &errConnectionFailed{}) } -// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. -// -// Deprecated: this function was only used internally, and will be removed in the next release. -func ErrorConnectionFailed(host string) error { - return connectionFailed(host) -} - // connectionFailed returns an error with host in the error message when connection // to docker daemon failed. func connectionFailed(host string) error { @@ -49,14 +42,6 @@ func connectionFailed(host string) error { return errConnectionFailed{error: err} } -// IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. It is an alias for [cerrdefs.IsNotFound]. -// -// Deprecated: use [cerrdefs.IsNotFound] instead. -func IsErrNotFound(err error) bool { - return cerrdefs.IsNotFound(err) -} - type objectNotFoundError struct { object string id string @@ -68,12 +53,12 @@ func (e objectNotFoundError) Error() string { return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) } -// NewVersionError returns an error if the APIVersion required is less than the +// requiresVersion returns an error if the APIVersion required is less than the // current supported version. // // It performs API-version negotiation if the Client is configured with this // option, otherwise it assumes the latest API version is used. -func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature string) error { +func (cli *Client) requiresVersion(ctx context.Context, apiRequired, feature string) error { // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. // @@ -82,8 +67,8 @@ func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature str if err := cli.checkVersion(ctx); err != nil { return err } - if cli.version != "" && versions.LessThan(cli.version, APIrequired) { - return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + if cli.version != "" && versions.LessThan(cli.version, apiRequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, apiRequired, cli.version) } return nil } diff --git a/vendor/github.com/moby/moby/client/filters.go b/vendor/github.com/moby/moby/client/filters.go new file mode 100644 index 000000000..347ad5c68 --- /dev/null +++ b/vendor/github.com/moby/moby/client/filters.go @@ -0,0 +1,59 @@ +package client + +import ( + "encoding/json" + "net/url" +) + +// Filters describes a predicate for an API request. +// +// Each entry in the map is a filter term. +// Each term is evaluated against the set of values. +// A filter term is satisfied if any one of the values in the set is a match. +// An item matches the filters when all terms are satisfied. +// +// Like all other map types in Go, the zero value is empty and read-only. +type Filters map[string]map[string]bool + +// Add appends values to the value-set of term. +// +// The receiver f is returned for chaining. +// +// f := make(Filters).Add("name", "foo", "bar").Add("status", "exited") +func (f Filters) Add(term string, values ...string) Filters { + if _, ok := f[term]; !ok { + f[term] = make(map[string]bool) + } + for _, v := range values { + f[term][v] = true + } + return f +} + +// Clone returns a deep copy of f. +func (f Filters) Clone() Filters { + out := make(Filters, len(f)) + for term, values := range f { + inner := make(map[string]bool, len(values)) + for v, ok := range values { + inner[v] = ok + } + out[term] = inner + } + return out +} + +// updateURLValues sets the "filters" key in values to the marshalled value of +// f, replacing any existing values. When f is empty, any existing "filters" key +// is removed. +func (f Filters) updateURLValues(values url.Values) { + if len(f) > 0 { + b, err := json.Marshal(f) + if err != nil { + panic(err) // Marshaling builtin types should never fail + } + values.Set("filters", string(b)) + } else { + values.Del("filters") + } +} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/moby/moby/client/hijack.go similarity index 65% rename from vendor/github.com/docker/docker/client/hijack.go rename to vendor/github.com/moby/moby/client/hijack.go index 2c78fad00..31c44e598 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/moby/moby/client/hijack.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bufio" @@ -9,38 +9,30 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/pkg/errors" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" ) // postHijacked sends a POST request and hijacks the connection. -func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { - bodyEncoded, err := encodeData(body) +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body any, headers map[string][]string) (HijackedResponse, error) { + jsonBody, err := jsonEncode(body) if err != nil { - return types.HijackedResponse{}, err + return HijackedResponse{}, err } - req, err := cli.buildRequest(ctx, http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) + req, err := cli.buildRequest(ctx, http.MethodPost, cli.getAPIPath(ctx, path, query), jsonBody, headers) if err != nil { - return types.HijackedResponse{}, err + return HijackedResponse{}, err } conn, mediaType, err := setupHijackConn(cli.dialer(), req, "tcp") if err != nil { - return types.HijackedResponse{}, err + return HijackedResponse{}, err } - if versions.LessThan(cli.ClientVersion(), "1.42") { - // Prior to 1.42, Content-Type is always set to raw-stream and not relevant - mediaType = "" - } - - return types.NewHijackedResponse(conn, mediaType), nil + return NewHijackedResponse(conn, mediaType), nil } // DialHijack returns a hijacked connection with negotiated protocol proto. func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, http.NoBody) if err != nil { return nil, err } @@ -57,18 +49,18 @@ func setupHijackConn(dialer func(context.Context) (net.Conn, error), req *http.R conn, err := dialer(ctx) if err != nil { - return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + return nil, "", fmt.Errorf("cannot connect to the Docker daemon. Is 'docker daemon' running on this host?: %w", err) } defer func() { if retErr != nil { - conn.Close() + _ = conn.Close() } }() // When we set up a TCP connection for hijack, there could be long periods // of inactivity (a long running command with no output) that in certain // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit + // state. Setting TCP KeepAlive on the socket connection prohibits // ECONNTIMEOUT unless the socket connection truly is broken if tcpConn, ok := conn.(*net.TCPConn); ok { _ = tcpConn.SetKeepAlive(true) @@ -91,7 +83,7 @@ func setupHijackConn(dialer func(context.Context) (net.Conn, error), req *http.R // If there is buffered content, wrap the connection. We return an // object that implements CloseWrite if the underlying connection // implements it. - if _, ok := hc.Conn.(types.CloseWriter); ok { + if _, ok := hc.Conn.(CloseWriter); ok { conn = &hijackedConnCloseWriter{hc} } else { conn = hc @@ -131,9 +123,50 @@ type hijackedConnCloseWriter struct { *hijackedConn } -var _ types.CloseWriter = &hijackedConnCloseWriter{} +var _ CloseWriter = &hijackedConnCloseWriter{} func (c *hijackedConnCloseWriter) CloseWrite() error { - conn := c.Conn.(types.CloseWriter) + conn := c.Conn.(CloseWriter) return conn.CloseWrite() } + +// NewHijackedResponse initializes a [HijackedResponse] type. +func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { + return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + mediaType string + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. +// returns false if HTTP Content-Type is not relevant, and the container must be +// inspected. +func (h *HijackedResponse) MediaType() (string, bool) { + if h.mediaType == "" { + return "", false + } + return h.mediaType, true +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/moby/moby/client/image_build.go similarity index 79% rename from vendor/github.com/docker/docker/client/image_build.go rename to vendor/github.com/moby/moby/client/image_build.go index 28b74a3f1..5062ec5de 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/moby/moby/client/image_build.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -8,25 +8,24 @@ import ( "net/http" "net/url" "strconv" - "strings" - "github.com/docker/docker/api/types/build" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" + cerrdefs "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/network" ) // ImageBuild sends a request to the daemon to build images. -// The Body in the response implements an io.ReadCloser and it's up to the caller to +// The Body in the response implements an [io.ReadCloser] and it's up to the caller to // close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options build.ImageBuildOptions) (build.ImageBuildResponse, error) { +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options ImageBuildOptions) (ImageBuildResult, error) { query, err := cli.imageBuildOptionsToQuery(ctx, options) if err != nil { - return build.ImageBuildResponse{}, err + return ImageBuildResult{}, err } buf, err := json.Marshal(options.AuthConfigs) if err != nil { - return build.ImageBuildResponse{}, err + return ImageBuildResult{}, err } headers := http.Header{} @@ -35,16 +34,15 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio resp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) if err != nil { - return build.ImageBuildResponse{}, err + return ImageBuildResult{}, err } - return build.ImageBuildResponse{ - Body: resp.Body, - OSType: getDockerOS(resp.Header.Get("Server")), + return ImageBuildResult{ + Body: resp.Body, }, nil } -func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options build.ImageBuildOptions) (url.Values, error) { +func (cli *Client) imageBuildOptionsToQuery(_ context.Context, options ImageBuildOptions) (url.Values, error) { query := url.Values{} if len(options.Tags) > 0 { query["t"] = options.Tags @@ -81,9 +79,7 @@ func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options build.I } if options.Squash { - if err := cli.NewVersionError(ctx, "1.25", "squash"); err != nil { - return query, err - } + // TODO(thaJeztah): squash is experimental, and deprecated when using BuildKit? query.Set("squash", "1") } @@ -158,11 +154,12 @@ func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options build.I if options.SessionID != "" { query.Set("session", options.SessionID) } - if options.Platform != "" { - if err := cli.NewVersionError(ctx, "1.32", "platform"); err != nil { - return query, err + if len(options.Platforms) > 0 { + if len(options.Platforms) > 1 { + // TODO(thaJeztah): update API spec and add equivalent check on the daemon. We need this still for older daemons, which would ignore it. + return query, cerrdefs.ErrInvalidArgument.WithMessage("specifying multiple platforms is not yet supported") } - query.Set("platform", strings.ToLower(options.Platform)) + query.Set("platform", formatPlatform(options.Platforms[0])) } if options.BuildID != "" { query.Set("buildid", options.BuildID) diff --git a/vendor/github.com/docker/docker/api/types/build/build.go b/vendor/github.com/moby/moby/client/image_build_opts.go similarity index 75% rename from vendor/github.com/docker/docker/api/types/build/build.go rename to vendor/github.com/moby/moby/client/image_build_opts.go index c43a0e21e..f65ad0f2b 100644 --- a/vendor/github.com/docker/docker/api/types/build/build.go +++ b/vendor/github.com/moby/moby/client/image_build_opts.go @@ -1,27 +1,14 @@ -package build +package client import ( "io" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/registry" + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/registry" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) -// BuilderVersion sets the version of underlying builder to use -type BuilderVersion string - -const ( - // BuilderV1 is the first generation builder in docker daemon - BuilderV1 BuilderVersion = "1" - // BuilderBuildKit is builder based on moby/buildkit project - BuilderBuildKit BuilderVersion = "2" -) - -// Result contains the image id of a successful build. -type Result struct { - ID string -} - // ImageBuildOptions holds the information // necessary to build images. type ImageBuildOptions struct { @@ -64,9 +51,11 @@ type ImageBuildOptions struct { ExtraHosts []string // List of extra hosts Target string SessionID string - Platform string + // Platforms selects the platforms to build the image for. Multiple platforms + // can be provided if the daemon supports multi-platform builds. + Platforms []ocispec.Platform // Version specifies the version of the underlying builder to use - Version BuilderVersion + Version build.BuilderVersion // BuildID is an optional identifier that can be passed together with the // build request. The same identifier can be used to gracefully cancel the // build with the cancel request. @@ -82,10 +71,9 @@ type ImageBuildOutput struct { Attrs map[string]string } -// ImageBuildResponse holds information +// ImageBuildResult holds information // returned by a server after building // an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string +type ImageBuildResult struct { + Body io.ReadCloser } diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/moby/moby/client/image_history.go similarity index 72% rename from vendor/github.com/docker/docker/client/image_history.go rename to vendor/github.com/moby/moby/client/image_history.go index 49381fb83..8618f1553 100644 --- a/vendor/github.com/docker/docker/client/image_history.go +++ b/vendor/github.com/moby/moby/client/image_history.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -6,7 +6,6 @@ import ( "fmt" "net/url" - "github.com/docker/docker/api/types/image" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -22,24 +21,24 @@ func ImageHistoryWithPlatform(platform ocispec.Platform) ImageHistoryOption { } // ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string, historyOpts ...ImageHistoryOption) ([]image.HistoryResponseItem, error) { +func (cli *Client) ImageHistory(ctx context.Context, imageID string, historyOpts ...ImageHistoryOption) (ImageHistoryResult, error) { query := url.Values{} var opts imageHistoryOpts for _, o := range historyOpts { if err := o.Apply(&opts); err != nil { - return nil, err + return ImageHistoryResult{}, err } } if opts.apiOptions.Platform != nil { - if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil { - return nil, err + if err := cli.requiresVersion(ctx, "1.48", "platform"); err != nil { + return ImageHistoryResult{}, err } p, err := encodePlatform(opts.apiOptions.Platform) if err != nil { - return nil, err + return ImageHistoryResult{}, err } query.Set("platform", p) } @@ -47,10 +46,10 @@ func (cli *Client) ImageHistory(ctx context.Context, imageID string, historyOpts resp, err := cli.get(ctx, "/images/"+imageID+"/history", query, nil) defer ensureReaderClosed(resp) if err != nil { - return nil, err + return ImageHistoryResult{}, err } - var history []image.HistoryResponseItem - err = json.NewDecoder(resp.Body).Decode(&history) + var history ImageHistoryResult + err = json.NewDecoder(resp.Body).Decode(&history.Items) return history, err } diff --git a/vendor/github.com/docker/docker/client/image_history_opts.go b/vendor/github.com/moby/moby/client/image_history_opts.go similarity index 53% rename from vendor/github.com/docker/docker/client/image_history_opts.go rename to vendor/github.com/moby/moby/client/image_history_opts.go index 6d3494dd0..7fc57afd1 100644 --- a/vendor/github.com/docker/docker/client/image_history_opts.go +++ b/vendor/github.com/moby/moby/client/image_history_opts.go @@ -1,7 +1,8 @@ package client import ( - "github.com/docker/docker/api/types/image" + "github.com/moby/moby/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // ImageHistoryOption is a type representing functional options for the image history operation. @@ -15,5 +16,14 @@ func (f imageHistoryOptionFunc) Apply(o *imageHistoryOpts) error { } type imageHistoryOpts struct { - apiOptions image.HistoryOptions + apiOptions imageHistoryOptions +} + +type imageHistoryOptions struct { + // Platform from the manifest list to use for history. + Platform *ocispec.Platform +} + +type ImageHistoryResult struct { + Items []image.HistoryResponseItem } diff --git a/vendor/github.com/moby/moby/client/image_import.go b/vendor/github.com/moby/moby/client/image_import.go new file mode 100644 index 000000000..f383f76d4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_import.go @@ -0,0 +1,66 @@ +package client + +import ( + "context" + "io" + "net/url" + + "github.com/distribution/reference" +) + +// ImageImportResult holds the response body returned by the daemon for image import. +type ImageImportResult interface { + io.ReadCloser +} + +// ImageImport creates a new image based on the source options. It returns the +// JSON content in the [ImageImportResult]. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ImageImport(ctx context.Context, source ImageImportSource, ref string, options ImageImportOptions) (ImageImportResult, error) { + if ref != "" { + // Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + if source.SourceName != "" { + query.Set("fromSrc", source.SourceName) + } + if ref != "" { + query.Set("repo", ref) + } + if options.Tag != "" { + query.Set("tag", options.Tag) + } + if options.Message != "" { + query.Set("message", options.Message) + } + if p := formatPlatform(options.Platform); p != "unknown" { + // TODO(thaJeztah): would we ever support mutiple platforms here? (would require multiple rootfs tars as well?) + query.Set("platform", p) + } + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return &imageImportResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +// ImageImportResult holds the response body returned by the daemon for image import. +type imageImportResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*imageImportResult)(nil) + _ ImageImportResult = (*imageImportResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/image_import_opts.go b/vendor/github.com/moby/moby/client/image_import_opts.go new file mode 100644 index 000000000..c70473bdd --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_import_opts.go @@ -0,0 +1,21 @@ +package client + +import ( + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform ocispec.Platform // Platform is the target platform of the image +} diff --git a/vendor/github.com/moby/moby/client/image_inspect.go b/vendor/github.com/moby/moby/client/image_inspect.go new file mode 100644 index 000000000..635931fd0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_inspect.go @@ -0,0 +1,62 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/url" +) + +// ImageInspect returns the image information. +func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts ...ImageInspectOption) (ImageInspectResult, error) { + if imageID == "" { + return ImageInspectResult{}, objectNotFoundError{object: "image", id: imageID} + } + + var opts imageInspectOpts + for _, opt := range inspectOpts { + if err := opt.Apply(&opts); err != nil { + return ImageInspectResult{}, fmt.Errorf("error applying image inspect option: %w", err) + } + } + + query := url.Values{} + if opts.apiOptions.Manifests { + if err := cli.requiresVersion(ctx, "1.48", "manifests"); err != nil { + return ImageInspectResult{}, err + } + query.Set("manifests", "1") + } + + if opts.apiOptions.Platform != nil { + if err := cli.requiresVersion(ctx, "1.49", "platform"); err != nil { + return ImageInspectResult{}, err + } + platform, err := encodePlatform(opts.apiOptions.Platform) + if err != nil { + return ImageInspectResult{}, err + } + query.Set("platform", platform) + } + + resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImageInspectResult{}, err + } + + buf := opts.raw + if buf == nil { + buf = &bytes.Buffer{} + } + + if _, err := io.Copy(buf, resp.Body); err != nil { + return ImageInspectResult{}, err + } + + var response ImageInspectResult + err = json.Unmarshal(buf.Bytes(), &response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/image_inspect_opts.go b/vendor/github.com/moby/moby/client/image_inspect_opts.go similarity index 70% rename from vendor/github.com/docker/docker/client/image_inspect_opts.go rename to vendor/github.com/moby/moby/client/image_inspect_opts.go index 655cbf0b7..266c1fe81 100644 --- a/vendor/github.com/docker/docker/client/image_inspect_opts.go +++ b/vendor/github.com/moby/moby/client/image_inspect_opts.go @@ -3,7 +3,7 @@ package client import ( "bytes" - "github.com/docker/docker/api/types/image" + "github.com/moby/moby/api/types/image" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -28,8 +28,9 @@ func ImageInspectWithRawResponse(raw *bytes.Buffer) ImageInspectOption { // ImageInspectWithManifests sets manifests API option for the image inspect operation. // This option is only available for API version 1.48 and up. -// With this option set, the image inspect operation response will have the -// [image.InspectResponse.Manifests] field populated if the server is multi-platform capable. +// With this option set, the image inspect operation response includes +// the [image.InspectResponse.Manifests] field if the server is multi-platform +// capable. func ImageInspectWithManifests(manifests bool) ImageInspectOption { return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { clientOpts.apiOptions.Manifests = manifests @@ -39,7 +40,7 @@ func ImageInspectWithManifests(manifests bool) ImageInspectOption { // ImageInspectWithPlatform sets platform API option for the image inspect operation. // This option is only available for API version 1.49 and up. -// With this option set, the image inspect operation will return information for the +// With this option set, the image inspect operation returns information for the // specified platform variant of the multi-platform image. func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption { return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { @@ -48,15 +49,21 @@ func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption { }) } -// ImageInspectWithAPIOpts sets the API options for the image inspect operation. -func ImageInspectWithAPIOpts(opts image.InspectOptions) ImageInspectOption { - return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { - clientOpts.apiOptions = opts - return nil - }) -} - type imageInspectOpts struct { raw *bytes.Buffer - apiOptions image.InspectOptions + apiOptions imageInspectOptions +} + +type imageInspectOptions struct { + // Manifests returns the image manifests. + Manifests bool + + // Platform selects the specific platform of a multi-platform image to inspect. + // + // This option is only available for API version 1.49 and up. + Platform *ocispec.Platform +} + +type ImageInspectResult struct { + image.InspectResponse } diff --git a/vendor/github.com/moby/moby/client/image_list.go b/vendor/github.com/moby/moby/client/image_list.go new file mode 100644 index 000000000..6df3c66e1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_list.go @@ -0,0 +1,53 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/image" + "github.com/moby/moby/client/pkg/versions" +) + +// ImageList returns a list of images in the docker host. +// +// Experimental: Set the [image.ListOptions.Manifest] option +// to include [image.Summary.Manifests] with information about image manifests. +// This is experimental and might change in the future without any backward +// compatibility. +func (cli *Client) ImageList(ctx context.Context, options ImageListOptions) (ImageListResult, error) { + var images []image.Summary + + query := url.Values{} + + options.Filters.updateURLValues(query) + if options.All { + query.Set("all", "1") + } + if options.SharedSize { + query.Set("shared-size", "1") + } + if options.Manifests { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + if err := cli.checkVersion(ctx); err != nil { + return ImageListResult{}, err + } + + if versions.GreaterThanOrEqualTo(cli.version, "1.47") { + query.Set("manifests", "1") + } + } + + resp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImageListResult{}, err + } + + err = json.NewDecoder(resp.Body).Decode(&images) + return ImageListResult{Items: images}, err +} diff --git a/vendor/github.com/moby/moby/client/image_list_opts.go b/vendor/github.com/moby/moby/client/image_list_opts.go new file mode 100644 index 000000000..a497d5790 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_list_opts.go @@ -0,0 +1,24 @@ +package client + +import "github.com/moby/moby/api/types/image" + +// ImageListOptions holds parameters to list images with. +type ImageListOptions struct { + // All controls whether all images in the graph are filtered, or just + // the heads. + All bool + + // Filters is a JSON-encoded set of filter arguments. + Filters Filters + + // SharedSize indicates whether the shared size of images should be computed. + SharedSize bool + + // Manifests indicates whether the image manifests should be returned. + Manifests bool +} + +// ImageListResult holds the result from ImageList. +type ImageListResult struct { + Items []image.Summary +} diff --git a/vendor/github.com/moby/moby/client/image_load.go b/vendor/github.com/moby/moby/client/image_load.go new file mode 100644 index 000000000..ec5fcae6e --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_load.go @@ -0,0 +1,64 @@ +package client + +import ( + "context" + "io" + "net/http" + "net/url" +) + +// ImageLoadResult returns information to the client about a load process. +// It implements [io.ReadCloser] and must be closed to avoid a resource leak. +type ImageLoadResult interface { + io.ReadCloser +} + +// ImageLoad loads an image in the docker host from the client host. It's up +// to the caller to close the [ImageLoadResult] returned by this function. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, loadOpts ...ImageLoadOption) (ImageLoadResult, error) { + var opts imageLoadOpts + for _, opt := range loadOpts { + if err := opt.Apply(&opts); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("quiet", "0") + if opts.apiOptions.Quiet { + query.Set("quiet", "1") + } + if len(opts.apiOptions.Platforms) > 0 { + if err := cli.requiresVersion(ctx, "1.48", "platform"); err != nil { + return nil, err + } + + p, err := encodePlatforms(opts.apiOptions.Platforms...) + if err != nil { + return nil, err + } + query["platform"] = p + } + + resp, err := cli.postRaw(ctx, "/images/load", query, input, http.Header{ + "Content-Type": {"application/x-tar"}, + }) + if err != nil { + return nil, err + } + return &imageLoadResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +// imageLoadResult returns information to the client about a load process. +type imageLoadResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*imageLoadResult)(nil) + _ ImageLoadResult = (*imageLoadResult)(nil) +) diff --git a/vendor/github.com/docker/docker/client/image_load_opts.go b/vendor/github.com/moby/moby/client/image_load_opts.go similarity index 69% rename from vendor/github.com/docker/docker/client/image_load_opts.go rename to vendor/github.com/moby/moby/client/image_load_opts.go index ebcedd41f..aeb4fcf83 100644 --- a/vendor/github.com/docker/docker/client/image_load_opts.go +++ b/vendor/github.com/moby/moby/client/image_load_opts.go @@ -3,7 +3,6 @@ package client import ( "fmt" - "github.com/docker/docker/api/types/image" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -18,7 +17,16 @@ func (f imageLoadOptionFunc) Apply(o *imageLoadOpts) error { } type imageLoadOpts struct { - apiOptions image.LoadOptions + apiOptions imageLoadOptions +} + +type imageLoadOptions struct { + // Quiet suppresses progress output + Quiet bool + + // Platforms selects the platforms to load if the image is a + // multi-platform image and has multiple variants. + Platforms []ocispec.Platform } // ImageLoadWithQuiet sets the quiet option for the image load operation. @@ -30,6 +38,10 @@ func ImageLoadWithQuiet(quiet bool) ImageLoadOption { } // ImageLoadWithPlatforms sets the platforms to be loaded from the image. +// +// Platform is an optional parameter that specifies the platform to load from +// the provided multi-platform image. Passing a platform only has an effect +// if the input image is a multi-platform image. func ImageLoadWithPlatforms(platforms ...ocispec.Platform) ImageLoadOption { return imageLoadOptionFunc(func(opt *imageLoadOpts) error { if opt.apiOptions.Platforms != nil { diff --git a/vendor/github.com/moby/moby/client/image_prune.go b/vendor/github.com/moby/moby/client/image_prune.go new file mode 100644 index 000000000..7f3a25b89 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_prune.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/image" +) + +// ImagePruneOptions holds parameters to prune images. +type ImagePruneOptions struct { + Filters Filters +} + +// ImagePruneResult holds the result from the [Client.ImagePrune] method. +type ImagePruneResult struct { + Report image.PruneReport +} + +// ImagePrune requests the daemon to delete unused data +func (cli *Client) ImagePrune(ctx context.Context, opts ImagePruneOptions) (ImagePruneResult, error) { + query := url.Values{} + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImagePruneResult{}, err + } + + var report image.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return ImagePruneResult{}, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return ImagePruneResult{Report: report}, nil +} diff --git a/vendor/github.com/moby/moby/client/image_pull.go b/vendor/github.com/moby/moby/client/image_pull.go new file mode 100644 index 000000000..11c0afa41 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_pull.go @@ -0,0 +1,93 @@ +package client + +import ( + "context" + "io" + "iter" + "net/http" + "net/url" + + cerrdefs "github.com/containerd/errdefs" + "github.com/distribution/reference" + "github.com/moby/moby/api/types/jsonstream" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/client/internal" +) + +type ImagePullResponse interface { + io.ReadCloser + JSONMessages(ctx context.Context) iter.Seq2[jsonstream.Message, error] + Wait(ctx context.Context) error +} + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// Callers can: +// - use [ImagePullResponse.Wait] to wait for pull to complete +// - use [ImagePullResponse.JSONMessages] to monitor pull progress as a sequence +// of JSONMessages, [ImagePullResponse.Close] does not need to be called in this case. +// - use the [io.Reader] interface and call [ImagePullResponse.Close] after processing. +func (cli *Client) ImagePull(ctx context.Context, refStr string, options ImagePullOptions) (ImagePullResponse, error) { + // FIXME(vdemeester): there is currently used in a few way in docker/docker + // - if not in trusted content, ref is used to pass the whole reference, and tag is empty + // - if in trusted content, ref is used to pass the reference name, and tag for the digest + // + // ref; https://github.com/docker-archive-public/docker.engine-api/pull/162 + + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", ref.Name()) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + if len(options.Platforms) > 0 { + if len(options.Platforms) > 1 { + // TODO(thaJeztah): update API spec and add equivalent check on the daemon. We need this still for older daemons, which would ignore it. + return nil, cerrdefs.ErrInvalidArgument.WithMessage("specifying multiple platforms is not yet supported") + } + query.Set("platform", formatPlatform(options.Platforms[0])) + } + resp, err := cli.tryImageCreate(ctx, query, staticAuth(options.RegistryAuth)) + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + resp, err = cli.tryImageCreate(ctx, query, options.PrivilegeFunc) + } + if err != nil { + return nil, err + } + + return internal.NewJSONMessageStream(resp.Body), nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, resolveAuth registry.RequestAuthConfig) (*http.Response, error) { + hdr := http.Header{} + if resolveAuth != nil { + registryAuth, err := resolveAuth(ctx) + if err != nil { + return nil, err + } + if registryAuth != "" { + hdr.Set(registry.AuthHeader, registryAuth) + } + } + return cli.post(ctx, "/images/create", query, nil, hdr) +} diff --git a/vendor/github.com/moby/moby/client/image_pull_opts.go b/vendor/github.com/moby/moby/client/image_pull_opts.go new file mode 100644 index 000000000..1b78185dd --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_pull_opts.go @@ -0,0 +1,25 @@ +package client + +import ( + "context" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + + // Platforms selects the platforms to pull. Multiple platforms can be + // specified if the image ia a multi-platform image. + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/moby/moby/client/image_push.go b/vendor/github.com/moby/moby/client/image_push.go new file mode 100644 index 000000000..5dd8bc140 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_push.go @@ -0,0 +1,98 @@ +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "iter" + "net/http" + "net/url" + + cerrdefs "github.com/containerd/errdefs" + "github.com/distribution/reference" + "github.com/moby/moby/api/types/jsonstream" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/client/internal" +) + +type ImagePushResponse interface { + io.ReadCloser + JSONMessages(ctx context.Context) iter.Seq2[jsonstream.Message, error] + Wait(ctx context.Context) error +} + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// Callers can +// - use [ImagePushResponse.Wait] to wait for push to complete +// - use [ImagePushResponse.JSONMessages] to monitor pull progress as a sequence +// of JSONMessages, [ImagePushResponse.Close] does not need to be called in this case. +// - use the [io.Reader] interface and call [ImagePushResponse.Close] after processing. +func (cli *Client) ImagePush(ctx context.Context, image string, options ImagePushOptions) (ImagePushResponse, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, ok := ref.(reference.Digested); ok { + return nil, errors.New("cannot push a digest reference") + } + + query := url.Values{} + if !options.All { + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + } + + if options.Platform != nil { + if err := cli.requiresVersion(ctx, "1.46", "platform"); err != nil { + return nil, err + } + + p := *options.Platform + pJson, err := json.Marshal(p) + if err != nil { + return nil, fmt.Errorf("invalid platform: %v", err) + } + + query.Set("platform", string(pJson)) + } + + resp, err := cli.tryImagePush(ctx, ref.Name(), query, staticAuth(options.RegistryAuth)) + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + resp, err = cli.tryImagePush(ctx, ref.Name(), query, options.PrivilegeFunc) + } + if err != nil { + return nil, err + } + return internal.NewJSONMessageStream(resp.Body), nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, resolveAuth registry.RequestAuthConfig) (*http.Response, error) { + hdr := http.Header{} + if resolveAuth != nil { + registryAuth, err := resolveAuth(ctx) + if err != nil { + return nil, err + } + if registryAuth != "" { + hdr.Set(registry.AuthHeader, registryAuth) + } + } + + // Always send a body (which may be an empty JSON document ("{}")) to prevent + // EOF errors on older daemons which had faulty fallback code for handling + // authentication in the body when no auth-header was set, resulting in; + // + // Error response from daemon: bad parameters and missing X-Registry-Auth: invalid X-Registry-Auth header: EOF + // + // We use [http.NoBody], which gets marshaled to an empty JSON document. + // + // see: https://github.com/moby/moby/commit/ea29dffaa541289591aa44fa85d2a596ce860e16 + return cli.post(ctx, "/images/"+imageID+"/push", query, http.NoBody, hdr) +} diff --git a/vendor/github.com/moby/moby/client/image_push_opts.go b/vendor/github.com/moby/moby/client/image_push_opts.go new file mode 100644 index 000000000..591c6b605 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_push_opts.go @@ -0,0 +1,26 @@ +package client + +import ( + "context" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImagePushOptions holds information to push images. +type ImagePushOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + + // Platform is an optional field that selects a specific platform to push + // when the image is a multi-platform image. + // Using this will only push a single platform-specific manifest. + Platform *ocispec.Platform `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/moby/moby/client/image_remove.go similarity index 73% rename from vendor/github.com/docker/docker/client/image_remove.go rename to vendor/github.com/moby/moby/client/image_remove.go index 0d769139b..095b4f04c 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/moby/moby/client/image_remove.go @@ -1,15 +1,15 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" "net/url" - "github.com/docker/docker/api/types/image" + "github.com/moby/moby/api/types/image" ) // ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options image.RemoveOptions) ([]image.DeleteResponse, error) { +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options ImageRemoveOptions) (ImageRemoveResult, error) { query := url.Values{} if options.Force { @@ -22,7 +22,7 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options imag if len(options.Platforms) > 0 { p, err := encodePlatforms(options.Platforms...) if err != nil { - return nil, err + return ImageRemoveResult{}, err } query["platforms"] = p } @@ -30,10 +30,10 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options imag resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) defer ensureReaderClosed(resp) if err != nil { - return nil, err + return ImageRemoveResult{}, err } var dels []image.DeleteResponse err = json.NewDecoder(resp.Body).Decode(&dels) - return dels, err + return ImageRemoveResult{Items: dels}, err } diff --git a/vendor/github.com/moby/moby/client/image_remove_opts.go b/vendor/github.com/moby/moby/client/image_remove_opts.go new file mode 100644 index 000000000..3b5d8a77f --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_remove_opts.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/moby/moby/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Platforms []ocispec.Platform + Force bool + PruneChildren bool +} + +// ImageRemoveResult holds the delete responses returned by the daemon. +type ImageRemoveResult struct { + Items []image.DeleteResponse +} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/moby/moby/client/image_save.go similarity index 50% rename from vendor/github.com/docker/docker/client/image_save.go rename to vendor/github.com/moby/moby/client/image_save.go index 0aa7177d2..508f88b7d 100644 --- a/vendor/github.com/docker/docker/client/image_save.go +++ b/vendor/github.com/moby/moby/client/image_save.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -6,11 +6,18 @@ import ( "net/url" ) -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +type ImageSaveResult interface { + io.ReadCloser +} + +// ImageSave retrieves one or more images from the docker host as an +// [ImageSaveResult]. Callers should close the reader, but the underlying +// [io.ReadCloser] is automatically closed if the context is canceled, // -// Platforms is an optional parameter that specifies the platforms to save from the image. -// This is only has effect if the input image is a multi-platform image. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string, saveOpts ...ImageSaveOption) (io.ReadCloser, error) { +// Platforms is an optional parameter that specifies the platforms to save +// from the image. Passing a platform only has an effect if the input image +// is a multi-platform image. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string, saveOpts ...ImageSaveOption) (ImageSaveResult, error) { var opts imageSaveOpts for _, opt := range saveOpts { if err := opt.Apply(&opts); err != nil { @@ -23,7 +30,7 @@ func (cli *Client) ImageSave(ctx context.Context, imageIDs []string, saveOpts .. } if len(opts.apiOptions.Platforms) > 0 { - if err := cli.NewVersionError(ctx, "1.48", "platform"); err != nil { + if err := cli.requiresVersion(ctx, "1.48", "platform"); err != nil { return nil, err } p, err := encodePlatforms(opts.apiOptions.Platforms...) @@ -37,5 +44,16 @@ func (cli *Client) ImageSave(ctx context.Context, imageIDs []string, saveOpts .. if err != nil { return nil, err } - return resp.Body, nil + return &imageSaveResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type imageSaveResult struct { + io.ReadCloser } + +var ( + _ io.ReadCloser = (*imageSaveResult)(nil) + _ ImageSaveResult = (*imageSaveResult)(nil) +) diff --git a/vendor/github.com/docker/docker/client/image_save_opts.go b/vendor/github.com/moby/moby/client/image_save_opts.go similarity index 60% rename from vendor/github.com/docker/docker/client/image_save_opts.go rename to vendor/github.com/moby/moby/client/image_save_opts.go index acd8f282b..9c0b3b74a 100644 --- a/vendor/github.com/docker/docker/client/image_save_opts.go +++ b/vendor/github.com/moby/moby/client/image_save_opts.go @@ -3,7 +3,6 @@ package client import ( "fmt" - "github.com/docker/docker/api/types/image" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -17,8 +16,11 @@ func (f imageSaveOptionFunc) Apply(o *imageSaveOpts) error { return f(o) } -// ImageSaveWithPlatforms sets the platforms to be saved from the image. +// ImageSaveWithPlatforms sets the platforms to be saved from the image. It +// produces an error if platforms are already set. This option only has an +// effect if the input image is a multi-platform image. func ImageSaveWithPlatforms(platforms ...ocispec.Platform) ImageSaveOption { + // TODO(thaJeztah): verify the GoDoc; do we produce an error for a single-platform image without the given platform? return imageSaveOptionFunc(func(opt *imageSaveOpts) error { if opt.apiOptions.Platforms != nil { return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms) @@ -29,5 +31,11 @@ func ImageSaveWithPlatforms(platforms ...ocispec.Platform) ImageSaveOption { } type imageSaveOpts struct { - apiOptions image.SaveOptions + apiOptions imageSaveOptions +} + +type imageSaveOptions struct { + // Platforms selects the platforms to save if the image is a + // multi-platform image and has multiple variants. + Platforms []ocispec.Platform } diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/moby/moby/client/image_search.go similarity index 70% rename from vendor/github.com/docker/docker/client/image_search.go rename to vendor/github.com/moby/moby/client/image_search.go index f3aab43a8..6e280906a 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/moby/moby/client/image_search.go @@ -1,4 +1,4 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" @@ -8,13 +8,12 @@ import ( "strconv" cerrdefs "github.com/containerd/errdefs" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" + "github.com/moby/moby/api/types/registry" ) // ImageSearch makes the docker host search by a term in a remote registry. // The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) { +func (cli *Client) ImageSearch(ctx context.Context, term string, options ImageSearchOptions) (ImageSearchResult, error) { var results []registry.SearchResult query := url.Values{} query.Set("term", term) @@ -22,29 +21,23 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options registr query.Set("limit", strconv.Itoa(options.Limit)) } - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } + options.Filters.updateURLValues(query) resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) defer ensureReaderClosed(resp) if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { - return results, privilegeErr + return ImageSearchResult{}, privilegeErr } resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) } if err != nil { - return results, err + return ImageSearchResult{}, err } err = json.NewDecoder(resp.Body).Decode(&results) - return results, err + return ImageSearchResult{Items: results}, err } func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { diff --git a/vendor/github.com/moby/moby/client/image_search_opts.go b/vendor/github.com/moby/moby/client/image_search_opts.go new file mode 100644 index 000000000..95a7d41fa --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_search_opts.go @@ -0,0 +1,27 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/registry" +) + +// ImageSearchResult wraps results returned by ImageSearch. +type ImageSearchResult struct { + Items []registry.SearchResult +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + Filters Filters + Limit int +} diff --git a/vendor/github.com/moby/moby/client/image_tag.go b/vendor/github.com/moby/moby/client/image_tag.go new file mode 100644 index 000000000..5566f4624 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_tag.go @@ -0,0 +1,48 @@ +package client + +import ( + "context" + "errors" + "fmt" + "net/url" + + "github.com/distribution/reference" +) + +type ImageTagOptions struct { + Source string + Target string +} + +type ImageTagResult struct{} + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, options ImageTagOptions) (ImageTagResult, error) { + source := options.Source + target := options.Target + + if _, err := reference.ParseAnyReference(source); err != nil { + return ImageTagResult{}, fmt.Errorf("error parsing reference: %q is not a valid repository/tag: %w", source, err) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return ImageTagResult{}, fmt.Errorf("error parsing reference: %q is not a valid repository/tag: %w", target, err) + } + + if _, ok := ref.(reference.Digested); ok { + return ImageTagResult{}, errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", ref.Name()) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + defer ensureReaderClosed(resp) + return ImageTagResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/internal/json-stream.go b/vendor/github.com/moby/moby/client/internal/json-stream.go new file mode 100644 index 000000000..552978f9a --- /dev/null +++ b/vendor/github.com/moby/moby/client/internal/json-stream.go @@ -0,0 +1,50 @@ +package internal + +import ( + "encoding/json" + "io" + "slices" + + "github.com/moby/moby/api/types" +) + +const rs = 0x1E + +type DecoderFn func(v any) error + +// NewJSONStreamDecoder builds adequate DecoderFn to read json records formatted with specified content-type +func NewJSONStreamDecoder(r io.Reader, contentType string) DecoderFn { + switch contentType { + case types.MediaTypeJSONSequence: + return json.NewDecoder(NewRSFilterReader(r)).Decode + case types.MediaTypeJSON, types.MediaTypeNDJSON: + fallthrough + default: + return json.NewDecoder(r).Decode + } +} + +// RSFilterReader wraps an io.Reader and filters out ASCII RS characters +type RSFilterReader struct { + reader io.Reader + buffer []byte +} + +// NewRSFilterReader creates a new RSFilterReader that filters out RS characters +func NewRSFilterReader(r io.Reader) *RSFilterReader { + return &RSFilterReader{ + reader: r, + buffer: make([]byte, 4096), // Internal buffer for reading chunks + } +} + +// Read implements the io.Reader interface, filtering out RS characters +func (r *RSFilterReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + n, err = r.reader.Read(p) + filtered := slices.DeleteFunc(p[:n], func(b byte) bool { return b == rs }) + return len(filtered), err +} diff --git a/vendor/github.com/moby/moby/client/internal/jsonmessages.go b/vendor/github.com/moby/moby/client/internal/jsonmessages.go new file mode 100644 index 000000000..ebbb5faa3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/internal/jsonmessages.go @@ -0,0 +1,79 @@ +package internal + +import ( + "context" + "encoding/json" + "errors" + "io" + "iter" + "sync" + + "github.com/moby/moby/api/types/jsonstream" +) + +func NewJSONMessageStream(rc io.ReadCloser) stream { + if rc == nil { + panic("nil io.ReadCloser") + } + return stream{ + rc: rc, + close: sync.OnceValue(rc.Close), + } +} + +type stream struct { + rc io.ReadCloser + close func() error +} + +// Read implements io.ReadCloser +func (r stream) Read(p []byte) (n int, err error) { + if r.rc == nil { + return 0, io.EOF + } + return r.rc.Read(p) +} + +// Close implements io.ReadCloser +func (r stream) Close() error { + if r.close == nil { + return nil + } + return r.close() +} + +// JSONMessages decodes the response stream as a sequence of JSONMessages. +// if stream ends or context is cancelled, the underlying [io.Reader] is closed. +func (r stream) JSONMessages(ctx context.Context) iter.Seq2[jsonstream.Message, error] { + context.AfterFunc(ctx, func() { + _ = r.Close() + }) + dec := json.NewDecoder(r) + return func(yield func(jsonstream.Message, error) bool) { + defer r.Close() + for { + var jm jsonstream.Message + err := dec.Decode(&jm) + if errors.Is(err, io.EOF) { + break + } + if ctx.Err() != nil { + yield(jm, ctx.Err()) + return + } + if !yield(jm, err) { + return + } + } + } +} + +// Wait waits for operation to complete and detects errors reported as JSONMessage +func (r stream) Wait(ctx context.Context) error { + for _, err := range r.JSONMessages(ctx) { + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/moby/moby/client/internal/timestamp/timestamp.go similarity index 98% rename from vendor/github.com/docker/docker/api/types/time/timestamp.go rename to vendor/github.com/moby/moby/client/internal/timestamp/timestamp.go index edd1d6ecb..7b175f0c9 100644 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ b/vendor/github.com/moby/moby/client/internal/timestamp/timestamp.go @@ -1,4 +1,4 @@ -package time // import "github.com/docker/docker/api/types/time" +package timestamp import ( "fmt" diff --git a/vendor/github.com/moby/moby/client/login.go b/vendor/github.com/moby/moby/client/login.go new file mode 100644 index 000000000..b295080ab --- /dev/null +++ b/vendor/github.com/moby/moby/client/login.go @@ -0,0 +1,45 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/registry" +) + +type RegistryLoginOptions struct { + Username string + Password string + ServerAddress string + IdentityToken string + RegistryToken string +} + +// RegistryLoginResult holds the result of a RegistryLogin query. +type RegistryLoginResult struct { + Auth registry.AuthResponse +} + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, options RegistryLoginOptions) (RegistryLoginResult, error) { + auth := registry.AuthConfig{ + Username: options.Username, + Password: options.Password, + ServerAddress: options.ServerAddress, + IdentityToken: options.IdentityToken, + RegistryToken: options.RegistryToken, + } + + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + defer ensureReaderClosed(resp) + + if err != nil { + return RegistryLoginResult{}, err + } + + var response registry.AuthResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return RegistryLoginResult{Auth: response}, err +} diff --git a/vendor/github.com/moby/moby/client/network_connect.go b/vendor/github.com/moby/moby/client/network_connect.go new file mode 100644 index 000000000..40db955a9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_connect.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/network" +) + +// NetworkConnectOptions represents the data to be used to connect a container to the +// network. +type NetworkConnectOptions struct { + Container string + EndpointConfig *network.EndpointSettings +} + +// NetworkConnectResult represents the result of a NetworkConnect operation. +type NetworkConnectResult struct { + // Currently empty; placeholder for future fields. +} + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID string, options NetworkConnectOptions) (NetworkConnectResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkConnectResult{}, err + } + + containerID, err := trimID("container", options.Container) + if err != nil { + return NetworkConnectResult{}, err + } + + nc := network.ConnectRequest{ + Container: containerID, + EndpointConfig: options.EndpointConfig, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + defer ensureReaderClosed(resp) + return NetworkConnectResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/network_create.go b/vendor/github.com/moby/moby/client/network_create.go new file mode 100644 index 000000000..25ea32af4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_create.go @@ -0,0 +1,69 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/network" +) + +// NetworkCreateOptions holds options to create a network. +type NetworkCreateOptions struct { + Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). + EnableIPv4 *bool // EnableIPv4 represents whether to enable IPv4. + EnableIPv6 *bool // EnableIPv6 represents whether to enable IPv6. + IPAM *network.IPAM // IPAM is the network's IP Address Management. + Internal bool // Internal represents if the network is used internal only. + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + ConfigFrom string // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly]. + Options map[string]string // Options specifies the network-specific options to use for when creating the network. + Labels map[string]string // Labels holds metadata specific to the network being created. +} + +// NetworkCreateResult represents the result of a network create operation. +type NetworkCreateResult struct { + ID string + + Warning []string +} + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options NetworkCreateOptions) (NetworkCreateResult, error) { + req := network.CreateRequest{ + Name: name, + Driver: options.Driver, + Scope: options.Scope, + EnableIPv4: options.EnableIPv4, + EnableIPv6: options.EnableIPv6, + IPAM: options.IPAM, + Internal: options.Internal, + Attachable: options.Attachable, + Ingress: options.Ingress, + ConfigOnly: options.ConfigOnly, + Options: options.Options, + Labels: options.Labels, + } + + if options.ConfigFrom != "" { + req.ConfigFrom = &network.ConfigReference{Network: options.ConfigFrom} + } + + resp, err := cli.post(ctx, "/networks/create", nil, req, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NetworkCreateResult{}, err + } + + var response network.CreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + + var warnings []string + if response.Warning != "" { + warnings = []string{response.Warning} + } + + return NetworkCreateResult{ID: response.ID, Warning: warnings}, err +} diff --git a/vendor/github.com/moby/moby/client/network_disconnect.go b/vendor/github.com/moby/moby/client/network_disconnect.go new file mode 100644 index 000000000..64a1796b8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_disconnect.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/network" +) + +// NetworkDisconnectOptions represents the data to be used to disconnect a container +// from the network. +type NetworkDisconnectOptions struct { + Container string + Force bool +} + +// NetworkDisconnectResult represents the result of a NetworkDisconnect operation. +type NetworkDisconnectResult struct { + // Currently empty; placeholder for future fields. +} + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID string, options NetworkDisconnectOptions) (NetworkDisconnectResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkDisconnectResult{}, err + } + + containerID, err := trimID("container", options.Container) + if err != nil { + return NetworkDisconnectResult{}, err + } + + req := network.DisconnectRequest{ + Container: containerID, + Force: options.Force, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, req, nil) + defer ensureReaderClosed(resp) + return NetworkDisconnectResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/network_inspect.go b/vendor/github.com/moby/moby/client/network_inspect.go new file mode 100644 index 000000000..775780527 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_inspect.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/network" +) + +// NetworkInspectResult contains the result of a network inspection. +type NetworkInspectResult struct { + Network network.Inspect + Raw json.RawMessage +} + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options NetworkInspectOptions) (NetworkInspectResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkInspectResult{}, err + } + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + + resp, err := cli.get(ctx, "/networks/"+networkID, query, nil) + if err != nil { + return NetworkInspectResult{}, err + } + + var out NetworkInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Network) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/network_inspect_opts.go b/vendor/github.com/moby/moby/client/network_inspect_opts.go new file mode 100644 index 000000000..d83f113e1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_inspect_opts.go @@ -0,0 +1,7 @@ +package client + +// NetworkInspectOptions holds parameters to inspect network. +type NetworkInspectOptions struct { + Scope string + Verbose bool +} diff --git a/vendor/github.com/moby/moby/client/network_list.go b/vendor/github.com/moby/moby/client/network_list.go new file mode 100644 index 000000000..d65f56097 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_list.go @@ -0,0 +1,28 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/network" +) + +// NetworkListResult holds the result from the [Client.NetworkList] method. +type NetworkListResult struct { + Items []network.Summary +} + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options NetworkListOptions) (NetworkListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/networks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NetworkListResult{}, err + } + var res NetworkListResult + err = json.NewDecoder(resp.Body).Decode(&res.Items) + return res, err +} diff --git a/vendor/github.com/moby/moby/client/network_list_opts.go b/vendor/github.com/moby/moby/client/network_list_opts.go new file mode 100644 index 000000000..0d21ab313 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_list_opts.go @@ -0,0 +1,6 @@ +package client + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters Filters +} diff --git a/vendor/github.com/moby/moby/client/network_prune.go b/vendor/github.com/moby/moby/client/network_prune.go new file mode 100644 index 000000000..55f7cac02 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_prune.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/network" +) + +// NetworkPruneOptions holds parameters to prune networks. +type NetworkPruneOptions struct { + Filters Filters +} + +// NetworkPruneResult holds the result from the [Client.NetworkPrune] method. +type NetworkPruneResult struct { + Report network.PruneReport +} + +// NetworkPrune requests the daemon to delete unused networks +func (cli *Client) NetworkPrune(ctx context.Context, opts NetworkPruneOptions) (NetworkPruneResult, error) { + query := url.Values{} + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NetworkPruneResult{}, err + } + + var report network.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return NetworkPruneResult{}, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return NetworkPruneResult{Report: report}, nil +} diff --git a/vendor/github.com/moby/moby/client/network_remove.go b/vendor/github.com/moby/moby/client/network_remove.go new file mode 100644 index 000000000..2bceb0d93 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_remove.go @@ -0,0 +1,26 @@ +package client + +import ( + "context" +) + +// NetworkRemoveOptions specifies options for removing a network. +type NetworkRemoveOptions struct { + // No options currently; placeholder for future use. +} + +// NetworkRemoveResult represents the result of a network removal operation. +type NetworkRemoveResult struct { + // No fields currently; placeholder for future use. +} + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string, options NetworkRemoveOptions) (NetworkRemoveResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkRemoveResult{}, err + } + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + defer ensureReaderClosed(resp) + return NetworkRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/node_inspect.go b/vendor/github.com/moby/moby/client/node_inspect.go new file mode 100644 index 000000000..cd4ce0119 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_inspect.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/moby/moby/api/types/swarm" +) + +// NodeInspectOptions holds parameters to inspect nodes with. +type NodeInspectOptions struct{} + +type NodeInspectResult struct { + Node swarm.Node + Raw json.RawMessage +} + +// NodeInspect returns the node information. +func (cli *Client) NodeInspect(ctx context.Context, nodeID string, options NodeInspectOptions) (NodeInspectResult, error) { + nodeID, err := trimID("node", nodeID) + if err != nil { + return NodeInspectResult{}, err + } + resp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NodeInspectResult{}, err + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return NodeInspectResult{}, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return NodeInspectResult{Node: response, Raw: body}, err +} diff --git a/vendor/github.com/moby/moby/client/node_list.go b/vendor/github.com/moby/moby/client/node_list.go new file mode 100644 index 000000000..1a1b57922 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_list.go @@ -0,0 +1,33 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters Filters +} + +type NodeListResult struct { + Items []swarm.Node +} + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options NodeListOptions) (NodeListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/nodes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NodeListResult{}, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.Body).Decode(&nodes) + return NodeListResult{Items: nodes}, err +} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/moby/moby/client/node_remove.go similarity index 56% rename from vendor/github.com/docker/docker/client/node_remove.go rename to vendor/github.com/moby/moby/client/node_remove.go index 07d8e6536..56c39d67a 100644 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/moby/moby/client/node_remove.go @@ -1,17 +1,21 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "net/url" - - "github.com/docker/docker/api/types/swarm" ) +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} +type NodeRemoveResult struct{} + // NodeRemove removes a Node. -func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options swarm.NodeRemoveOptions) error { +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options NodeRemoveOptions) (NodeRemoveResult, error) { nodeID, err := trimID("node", nodeID) if err != nil { - return err + return NodeRemoveResult{}, err } query := url.Values{} @@ -21,5 +25,5 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options swarm. resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) defer ensureReaderClosed(resp) - return err + return NodeRemoveResult{}, err } diff --git a/vendor/github.com/moby/moby/client/node_update.go b/vendor/github.com/moby/moby/client/node_update.go new file mode 100644 index 000000000..4bc7c3b69 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_update.go @@ -0,0 +1,30 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// NodeUpdateOptions holds parameters to update nodes with. +type NodeUpdateOptions struct { + Version swarm.Version + Spec swarm.NodeSpec +} + +type NodeUpdateResult struct{} + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, options NodeUpdateOptions) (NodeUpdateResult, error) { + nodeID, err := trimID("node", nodeID) + if err != nil { + return NodeUpdateResult{}, err + } + + query := url.Values{} + query.Set("version", options.Version.String()) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + return NodeUpdateResult{}, err +} diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/moby/moby/client/options.go similarity index 66% rename from vendor/github.com/docker/docker/client/options.go rename to vendor/github.com/moby/moby/client/options.go index 6f68fc2b8..1b1e92bbb 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/moby/moby/client/options.go @@ -2,6 +2,7 @@ package client import ( "context" + "fmt" "net" "net/http" "os" @@ -11,13 +12,47 @@ import ( "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel/trace" ) +type clientConfig struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // userAgent is the User-Agent header to use for HTTP requests. It takes + // precedence over User-Agent headers set in customHTTPHeaders, and other + // header variables. When set to an empty string, the User-Agent header + // is removed, and no header is sent. + userAgent *string + // custom HTTP headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool + + // negotiateVersion indicates if the client should automatically negotiate + // the API version to use when making requests. API version negotiation is + // performed on the first request, after which negotiated is set to "true" + // so that subsequent requests do not re-negotiate. + negotiateVersion bool + + // traceOpts is a list of options to configure the tracing span. + traceOpts []otelhttp.Option +} + // Opt is a configuration option to initialize a [Client]. -type Opt func(*Client) error +type Opt func(*clientConfig) error // FromEnv configures the client with values from environment variables. It // is the equivalent of using the [WithTLSClientConfigFromEnv], [WithHostFromEnv], @@ -32,7 +67,7 @@ type Opt func(*Client) error // which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem'). // - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification // (off by default). -func FromEnv(c *Client) error { +func FromEnv(c *clientConfig) error { ops := []Opt{ WithTLSClientConfigFromEnv(), WithHostFromEnv(), @@ -50,18 +85,18 @@ func FromEnv(c *Client) error { // used to set the Timeout and KeepAlive settings of the client. It returns // an error if the client does not have a [http.Transport] configured. func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { - return func(c *Client) error { + return func(c *clientConfig) error { if transport, ok := c.client.Transport.(*http.Transport); ok { transport.DialContext = dialContext return nil } - return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) + return fmt.Errorf("cannot apply dialer to transport: %T", c.client.Transport) } } // WithHost overrides the client host with the specified one. func WithHost(host string) Opt { - return func(c *Client) error { + return func(c *clientConfig) error { hostURL, err := ParseHostURL(host) if err != nil { return err @@ -73,15 +108,30 @@ func WithHost(host string) Opt { if transport, ok := c.client.Transport.(*http.Transport); ok { return sockets.ConfigureTransport(transport, c.proto, c.addr) } - return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) + // For test transports, we skip transport configuration but still + // set the host fields so that the client can use them for headers + if _, ok := c.client.Transport.(testRoundTripper); ok { + return nil + } + return fmt.Errorf("cannot apply host to transport: %T", c.client.Transport) } } +// testRoundTripper allows us to inject a mock-transport for testing. We define it +// here so we can detect the tlsconfig and return nil for only this type. +type testRoundTripper func(*http.Request) (*http.Response, error) + +func (tf testRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return tf(req) +} + +func (testRoundTripper) skipConfigureTransport() bool { return true } + // WithHostFromEnv overrides the client host with the host specified in the // DOCKER_HOST ([EnvOverrideHost]) environment variable. If DOCKER_HOST is not set, // or set to an empty value, the host is not modified. func WithHostFromEnv() Opt { - return func(c *Client) error { + return func(c *clientConfig) error { if host := os.Getenv(EnvOverrideHost); host != "" { return WithHost(host)(c) } @@ -91,7 +141,7 @@ func WithHostFromEnv() Opt { // WithHTTPClient overrides the client's HTTP client with the specified one. func WithHTTPClient(client *http.Client) Opt { - return func(c *Client) error { + return func(c *clientConfig) error { if client != nil { c.client = client } @@ -101,7 +151,7 @@ func WithHTTPClient(client *http.Client) Opt { // WithTimeout configures the time limit for requests made by the HTTP client. func WithTimeout(timeout time.Duration) Opt { - return func(c *Client) error { + return func(c *clientConfig) error { c.client.Timeout = timeout return nil } @@ -111,7 +161,7 @@ func WithTimeout(timeout time.Duration) Opt { // It overrides any User-Agent set in headers. When set to an empty string, // the User-Agent header is removed, and no header is sent. func WithUserAgent(ua string) Opt { - return func(c *Client) error { + return func(c *clientConfig) error { c.userAgent = &ua return nil } @@ -121,7 +171,7 @@ func WithUserAgent(ua string) Opt { // It does not allow for built-in headers (such as "User-Agent", if set) to // be overridden. Also see [WithUserAgent]. func WithHTTPHeaders(headers map[string]string) Opt { - return func(c *Client) error { + return func(c *clientConfig) error { c.customHTTPHeaders = headers return nil } @@ -129,7 +179,7 @@ func WithHTTPHeaders(headers map[string]string) Opt { // WithScheme overrides the client scheme with the specified one. func WithScheme(scheme string) Opt { - return func(c *Client) error { + return func(c *clientConfig) error { c.scheme = scheme return nil } @@ -137,10 +187,10 @@ func WithScheme(scheme string) Opt { // WithTLSClientConfig applies a TLS config to the client transport. func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { - return func(c *Client) error { + return func(c *clientConfig) error { transport, ok := c.client.Transport.(*http.Transport) if !ok { - return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + return fmt.Errorf("cannot apply tls config to transport: %T", c.client.Transport) } config, err := tlsconfig.Client(tlsconfig.Options{ CAFile: cacertPath, @@ -149,7 +199,7 @@ func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { ExclusiveRootPools: true, }) if err != nil { - return errors.Wrap(err, "failed to create tls config") + return fmt.Errorf("failed to create tls config: %w", err) } transport.TLSClientConfig = config return nil @@ -168,7 +218,7 @@ func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { // - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification // (off by default). func WithTLSClientConfigFromEnv() Opt { - return func(c *Client) error { + return func(c *clientConfig) error { dockerCertPath := os.Getenv(EnvOverrideCertPath) if dockerCertPath == "" { return nil @@ -194,8 +244,12 @@ func WithTLSClientConfigFromEnv() Opt { // WithVersion overrides the client version with the specified one. If an empty // version is provided, the value is ignored to allow version negotiation // (see [WithAPIVersionNegotiation]). +// +// WithVersion does not validate if the client supports the given version, +// and callers should verify if the version is in the correct format and +// lower than the maximum supported version as defined by [MaxAPIVersion]. func WithVersion(version string) Opt { - return func(c *Client) error { + return func(c *clientConfig) error { if v := strings.TrimPrefix(version, "v"); v != "" { c.version = v c.manualOverride = true @@ -208,8 +262,12 @@ func WithVersion(version string) Opt { // the DOCKER_API_VERSION ([EnvOverrideAPIVersion]) environment variable. // If DOCKER_API_VERSION is not set, or set to an empty value, the version // is not modified. +// +// WithVersion does not validate if the client supports the given version, +// and callers should verify if the version is in the correct format and +// lower than the maximum supported version as defined by [MaxAPIVersion]. func WithVersionFromEnv() Opt { - return func(c *Client) error { + return func(c *clientConfig) error { return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) } } @@ -219,21 +277,21 @@ func WithVersionFromEnv() Opt { // to use when making requests. API version negotiation is performed on the first // request; subsequent requests do not re-negotiate. func WithAPIVersionNegotiation() Opt { - return func(c *Client) error { + return func(c *clientConfig) error { c.negotiateVersion = true return nil } } // WithTraceProvider sets the trace provider for the client. -// If this is not set then the global trace provider will be used. +// If this is not set then the global trace provider is used. func WithTraceProvider(provider trace.TracerProvider) Opt { return WithTraceOptions(otelhttp.WithTracerProvider(provider)) } // WithTraceOptions sets tracing span options for the client. func WithTraceOptions(opts ...otelhttp.Option) Opt { - return func(c *Client) error { + return func(c *clientConfig) error { c.traceOpts = append(c.traceOpts, opts...) return nil } diff --git a/vendor/github.com/moby/moby/client/ping.go b/vendor/github.com/moby/moby/client/ping.go new file mode 100644 index 000000000..15c54faa0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/ping.go @@ -0,0 +1,151 @@ +package client + +import ( + "context" + "net/http" + "path" + "strings" + + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/swarm" +) + +// PingOptions holds options for [client.Ping]. +type PingOptions struct { + // NegotiateAPIVersion queries the API and updates the version to match the API + // version. NegotiateAPIVersion downgrades the client's API version to match the + // APIVersion if the ping version is lower than the default version. If the API + // version reported by the server is higher than the maximum version supported + // by the client, it uses the client's maximum version. + // + // If a manual override is in place, either through the "DOCKER_API_VERSION" + // ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized + // with a fixed version ([WithVersion]), no negotiation is performed. + // + // If the API server's ping response does not contain an API version, or if the + // client did not get a successful ping response, it assumes it is connected with + // an old daemon that does not support API version negotiation, in which case it + // downgrades to the lowest supported API version. + NegotiateAPIVersion bool + + // ForceNegotiate forces the client to re-negotiate the API version, even if + // API-version negotiation already happened. This option cannot be + // used if the client is configured with a fixed version using (using + // [WithVersion] or [WithVersionFromEnv]). + // + // This option has no effect if NegotiateAPIVersion is not set. + ForceNegotiate bool +} + +// PingResult holds the result of a [Client.Ping] API call. +type PingResult struct { + APIVersion string + OSType string + Experimental bool + BuilderVersion build.BuilderVersion + + // SwarmStatus provides information about the current swarm status of the + // engine, obtained from the "Swarm" header in the API response. + // + // It can be a nil struct if the API version does not provide this header + // in the ping response, or if an error occurred, in which case the client + // should use other ways to get the current swarm status, such as the /swarm + // endpoint. + SwarmStatus *SwarmStatus +} + +// SwarmStatus provides information about the current swarm status and role, +// obtained from the "Swarm" header in the API response. +type SwarmStatus struct { + // NodeState represents the state of the node. + NodeState swarm.LocalNodeState + + // ControlAvailable indicates if the node is a swarm manager. + ControlAvailable bool +} + +// Ping pings the server and returns the value of the "Docker-Experimental", +// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use +// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported +// by the daemon. It ignores internal server errors returned by the API, which +// may be returned if the daemon is in an unhealthy state, but returns errors +// for other non-success status codes, failing to connect to the API, or failing +// to parse the API response. +func (cli *Client) Ping(ctx context.Context, options PingOptions) (PingResult, error) { + if cli.manualOverride { + return cli.ping(ctx) + } + if !options.NegotiateAPIVersion && !cli.negotiateVersion { + return cli.ping(ctx) + } + + // Ensure exclusive write access to version and negotiated fields + cli.negotiateLock.Lock() + defer cli.negotiateLock.Unlock() + + ping, err := cli.ping(ctx) + if err != nil { + return cli.ping(ctx) + } + + if cli.negotiated.Load() && !options.ForceNegotiate { + return ping, nil + } + + return ping, cli.negotiateAPIVersion(ping.APIVersion) +} + +func (cli *Client) ping(ctx context.Context) (PingResult, error) { + // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() + // because ping requests are used during API version negotiation, so we want + // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping + req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return PingResult{}, err + } + resp, err := cli.doRequest(req) + defer ensureReaderClosed(resp) + if err == nil && resp.StatusCode == http.StatusOK { + // Fast-path; successfully connected using a HEAD request and + // we got a "OK" (200) status. For non-200 status-codes, we fall + // back to doing a GET request, as a HEAD request won't have a + // response-body to get error details from. + return newPingResult(resp), nil + } + + // HEAD failed or returned a non-OK status; fallback to GET. + req.Method = http.MethodGet + resp, err = cli.doRequest(req) + defer ensureReaderClosed(resp) + if err != nil { + // Failed to connect. + return PingResult{}, err + } + + // GET request succeeded but may have returned a non-200 status. + // Return a Ping response, together with any error returned by + // the API server. + return newPingResult(resp), checkResponseErr(resp) +} + +func newPingResult(resp *http.Response) PingResult { + if resp == nil { + return PingResult{} + } + var swarmStatus *SwarmStatus + if si := resp.Header.Get("Swarm"); si != "" { + state, role, _ := strings.Cut(si, "/") + swarmStatus = &SwarmStatus{ + NodeState: swarm.LocalNodeState(state), + ControlAvailable: role == "manager", + } + } + + return PingResult{ + APIVersion: resp.Header.Get("Api-Version"), + OSType: resp.Header.Get("Ostype"), + Experimental: resp.Header.Get("Docker-Experimental") == "true", + BuilderVersion: build.BuilderVersion(resp.Header.Get("Builder-Version")), + SwarmStatus: swarmStatus, + } +} diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/moby/moby/client/pkg/versions/compare.go similarity index 94% rename from vendor/github.com/docker/docker/api/types/versions/compare.go rename to vendor/github.com/moby/moby/client/pkg/versions/compare.go index 621725a36..1a0325c7e 100644 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/vendor/github.com/moby/moby/client/pkg/versions/compare.go @@ -1,4 +1,4 @@ -package versions // import "github.com/docker/docker/api/types/versions" +package versions import ( "strconv" diff --git a/vendor/github.com/moby/moby/client/plugin_create.go b/vendor/github.com/moby/moby/client/plugin_create.go new file mode 100644 index 000000000..c1a2dd5a6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_create.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "io" + "net/http" + "net/url" +) + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} + +// PluginCreateResult represents the result of a plugin create operation. +type PluginCreateResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions PluginCreateOptions) (PluginCreateResult, error) { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + defer ensureReaderClosed(resp) + return PluginCreateResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_disable.go b/vendor/github.com/moby/moby/client/plugin_disable.go new file mode 100644 index 000000000..65ab0aa00 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_disable.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "net/url" +) + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginDisableResult represents the result of a plugin disable operation. +type PluginDisableResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options PluginDisableOptions) (PluginDisableResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginDisableResult{}, err + } + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + defer ensureReaderClosed(resp) + return PluginDisableResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_enable.go b/vendor/github.com/moby/moby/client/plugin_enable.go new file mode 100644 index 000000000..7c3e26b67 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_enable.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginEnableResult represents the result of a plugin enable operation. +type PluginEnableResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options PluginEnableOptions) (PluginEnableResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginEnableResult{}, err + } + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + defer ensureReaderClosed(resp) + return PluginEnableResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_inspect.go b/vendor/github.com/moby/moby/client/plugin_inspect.go new file mode 100644 index 000000000..8caf06a8e --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/plugin" +) + +// PluginInspectOptions holds parameters to inspect a plugin. +type PluginInspectOptions struct { + // Add future optional parameters here +} + +// PluginInspectResult holds the result from the [Client.PluginInspect] method. +type PluginInspectResult struct { + Plugin plugin.Plugin + Raw json.RawMessage +} + +// PluginInspect inspects an existing plugin +func (cli *Client) PluginInspect(ctx context.Context, name string, options PluginInspectOptions) (PluginInspectResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginInspectResult{}, err + } + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + if err != nil { + return PluginInspectResult{}, err + } + + var out PluginInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Plugin) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_install.go b/vendor/github.com/moby/moby/client/plugin_install.go new file mode 100644 index 000000000..a589b2e1f --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_install.go @@ -0,0 +1,175 @@ +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + + cerrdefs "github.com/containerd/errdefs" + "github.com/distribution/reference" + "github.com/moby/moby/api/types/plugin" + "github.com/moby/moby/api/types/registry" +) + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + AcceptPermissionsFunc func(context.Context, plugin.Privileges) (bool, error) + Args []string +} + +// PluginInstallResult holds the result of a plugin install operation. +// It is an io.ReadCloser from which the caller can read installation progress or result. +type PluginInstallResult struct { + io.ReadCloser +} + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options PluginInstallOptions) (_ PluginInstallResult, retErr error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return PluginInstallResult{}, fmt.Errorf("invalid remote reference: %w", err) + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, &options) + if err != nil { + return PluginInstallResult{}, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return PluginInstallResult{}, err + } + + name = resp.Header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.Body) + if err != nil { + _ = pw.CloseWithError(err) + return + } + defer func() { + if retErr != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if _, err := cli.PluginSet(ctx, name, PluginSetOptions{Args: options.Args}); err != nil { + _ = pw.CloseWithError(err) + return + } + } + + if options.Disabled { + _ = pw.Close() + return + } + + _, enableErr := cli.PluginEnable(ctx, name, PluginEnableOptions{Timeout: 0}) + _ = pw.CloseWithError(enableErr) + }() + return PluginInstallResult{pr}, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { + return cli.get(ctx, "/plugins/privileges", query, http.Header{ + registry.AuthHeader: {registryAuth}, + }) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges plugin.Privileges, registryAuth string) (*http.Response, error) { + return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options pluginOptions) (plugin.Privileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.getRegistryAuth()) + if cerrdefs.IsUnauthorized(err) && options.getPrivilegeFunc() != nil { + // TODO: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.getPrivilegeFunc()(ctx) + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.setRegistryAuth(newAuthHeader) + resp, err = cli.tryPluginPrivileges(ctx, query, options.getRegistryAuth()) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges plugin.Privileges + if err := json.NewDecoder(resp.Body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.getAcceptAllPermissions() && options.getAcceptPermissionsFunc() != nil && len(privileges) > 0 { + accept, err := options.getAcceptPermissionsFunc()(ctx, privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, errors.New("permission denied while installing plugin " + options.getRemoteRef()) + } + } + return privileges, nil +} + +type pluginOptions interface { + getRegistryAuth() string + setRegistryAuth(string) + getPrivilegeFunc() func(context.Context) (string, error) + getAcceptAllPermissions() bool + getAcceptPermissionsFunc() func(context.Context, plugin.Privileges) (bool, error) + getRemoteRef() string +} + +func (o *PluginInstallOptions) getRegistryAuth() string { + return o.RegistryAuth +} + +func (o *PluginInstallOptions) setRegistryAuth(auth string) { + o.RegistryAuth = auth +} + +func (o *PluginInstallOptions) getPrivilegeFunc() func(context.Context) (string, error) { + return o.PrivilegeFunc +} + +func (o *PluginInstallOptions) getAcceptAllPermissions() bool { + return o.AcceptAllPermissions +} + +func (o *PluginInstallOptions) getAcceptPermissionsFunc() func(context.Context, plugin.Privileges) (bool, error) { + return o.AcceptPermissionsFunc +} + +func (o *PluginInstallOptions) getRemoteRef() string { + return o.RemoteRef +} diff --git a/vendor/github.com/moby/moby/client/plugin_list.go b/vendor/github.com/moby/moby/client/plugin_list.go new file mode 100644 index 000000000..cbd90b407 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/plugin" +) + +// PluginListOptions holds parameters to list plugins. +type PluginListOptions struct { + Filters Filters +} + +// PluginListResult represents the result of a plugin list operation. +type PluginListResult struct { + Items []plugin.Plugin +} + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, options PluginListOptions) (PluginListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/plugins", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return PluginListResult{}, err + } + + var plugins plugin.ListResponse + err = json.NewDecoder(resp.Body).Decode(&plugins) + return PluginListResult{Items: plugins}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_push.go b/vendor/github.com/moby/moby/client/plugin_push.go new file mode 100644 index 000000000..4ba25d133 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_push.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "io" + "net/http" + + "github.com/moby/moby/api/types/registry" +) + +// PluginPushOptions holds parameters to push a plugin. +type PluginPushOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// PluginPushResult is the result of a plugin push operation +type PluginPushResult struct { + io.ReadCloser +} + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, options PluginPushOptions) (PluginPushResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginPushResult{}, err + } + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{ + registry.AuthHeader: {options.RegistryAuth}, + }) + if err != nil { + return PluginPushResult{}, err + } + return PluginPushResult{resp.Body}, nil +} diff --git a/vendor/github.com/moby/moby/client/plugin_remove.go b/vendor/github.com/moby/moby/client/plugin_remove.go new file mode 100644 index 000000000..229f40858 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_remove.go @@ -0,0 +1,33 @@ +package client + +import ( + "context" + "net/url" +) + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginRemoveResult represents the result of a plugin removal. +type PluginRemoveResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options PluginRemoveOptions) (PluginRemoveResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginRemoveResult{}, err + } + + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + defer ensureReaderClosed(resp) + return PluginRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_set.go b/vendor/github.com/moby/moby/client/plugin_set.go new file mode 100644 index 000000000..c1f6bb5fa --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_set.go @@ -0,0 +1,27 @@ +package client + +import ( + "context" +) + +// PluginSetOptions defines options for modifying a plugin's settings. +type PluginSetOptions struct { + Args []string +} + +// PluginSetResult represents the result of a plugin set operation. +type PluginSetResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, options PluginSetOptions) (PluginSetResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginSetResult{}, err + } + + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, options.Args, nil) + defer ensureReaderClosed(resp) + return PluginSetResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_upgrade.go b/vendor/github.com/moby/moby/client/plugin_upgrade.go new file mode 100644 index 000000000..f9df6e584 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_upgrade.go @@ -0,0 +1,89 @@ +package client + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/distribution/reference" + "github.com/moby/moby/api/types/plugin" + "github.com/moby/moby/api/types/registry" +) + +// PluginUpgradeOptions holds parameters to upgrade a plugin. +type PluginUpgradeOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + AcceptPermissionsFunc func(context.Context, plugin.Privileges) (bool, error) + Args []string +} + +// PluginUpgradeResult holds the result of a plugin upgrade operation. +type PluginUpgradeResult io.ReadCloser + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options PluginUpgradeOptions) (PluginUpgradeResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return nil, err + } + + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, fmt.Errorf("invalid remote reference: %w", err) + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, &options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.Body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges plugin.Privileges, name, registryAuth string) (*http.Response, error) { + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) +} + +func (o *PluginUpgradeOptions) getRegistryAuth() string { + return o.RegistryAuth +} + +func (o *PluginUpgradeOptions) setRegistryAuth(auth string) { + o.RegistryAuth = auth +} + +func (o *PluginUpgradeOptions) getPrivilegeFunc() func(context.Context) (string, error) { + return o.PrivilegeFunc +} + +func (o *PluginUpgradeOptions) getAcceptAllPermissions() bool { + return o.AcceptAllPermissions +} + +func (o *PluginUpgradeOptions) getAcceptPermissionsFunc() func(context.Context, plugin.Privileges) (bool, error) { + return o.AcceptPermissionsFunc +} + +func (o *PluginUpgradeOptions) getRemoteRef() string { + return o.RemoteRef +} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/moby/moby/client/request.go similarity index 52% rename from vendor/github.com/docker/docker/client/request.go rename to vendor/github.com/moby/moby/client/request.go index d9074a736..f5c2b956a 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/moby/moby/client/request.go @@ -1,9 +1,10 @@ -package client // import "github.com/docker/docker/client" +package client import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "net" @@ -13,9 +14,7 @@ import ( "reflect" "strings" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/pkg/errors" + "github.com/moby/moby/api/types/common" ) // head sends an http request to the docker API using the method HEAD. @@ -28,25 +27,25 @@ func (cli *Client) get(ctx context.Context, path string, query url.Values, heade return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) } -// post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (*http.Response, error) { - body, headers, err := encodeBody(obj, headers) +// post sends an http POST request to the API. +func (cli *Client) post(ctx context.Context, path string, query url.Values, body any, headers http.Header) (*http.Response, error) { + jsonBody, headers, err := prepareJSONRequest(body, headers) if err != nil { return nil, err } - return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) + return cli.sendRequest(ctx, http.MethodPost, path, query, jsonBody, headers) } func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (*http.Response, error) { - body, headers, err := encodeBody(obj, headers) +func (cli *Client) put(ctx context.Context, path string, query url.Values, body any, headers http.Header) (*http.Response, error) { + jsonBody, headers, err := prepareJSONRequest(body, headers) if err != nil { return nil, err } - return cli.putRaw(ctx, path, query, body, headers) + return cli.putRaw(ctx, path, query, jsonBody, headers) } // putRaw sends an http request to the docker API using the method PUT. @@ -65,26 +64,36 @@ func (cli *Client) delete(ctx context.Context, path string, query url.Values, he return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) } -func encodeBody(obj interface{}, headers http.Header) (io.Reader, http.Header, error) { - if obj == nil { +// prepareJSONRequest encodes the given body to JSON and returns it as an [io.Reader], and sets the Content-Type +// header. If body is nil, or a nil-interface, a "nil" body is returned without +// error. +// +// TODO(thaJeztah): should this return an error if a different Content-Type is already set? +// TODO(thaJeztah): is "nil" the appropriate approach for an empty body, or should we use [http.NoBody] (or similar)? +func prepareJSONRequest(body any, headers http.Header) (io.Reader, http.Header, error) { + if body == nil { return nil, headers, nil } // encoding/json encodes a nil pointer as the JSON document `null`, // irrespective of whether the type implements json.Marshaler or encoding.TextMarshaler. // That is almost certainly not what the caller intended as the request body. - if reflect.TypeOf(obj).Kind() == reflect.Ptr && reflect.ValueOf(obj).IsNil() { + // + // TODO(thaJeztah): consider moving this to jsonEncode, which would also allow returning an (empty) reader instead of nil. + if reflect.TypeOf(body).Kind() == reflect.Ptr && reflect.ValueOf(body).IsNil() { return nil, headers, nil } - body, err := encodeData(obj) + jsonBody, err := jsonEncode(body) if err != nil { return nil, headers, err } - if headers == nil { - headers = make(map[string][]string) + hdr := http.Header{} + if headers != nil { + hdr = headers.Clone() } - headers["Content-Type"] = []string{"application/json"} - return body, headers, nil + + hdr.Set("Content-Type", "application/json") + return jsonBody, hdr, nil } func (cli *Client) buildRequest(ctx context.Context, method, path string, body io.Reader, headers http.Header) (*http.Request, error) { @@ -114,81 +123,111 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u } resp, err := cli.doRequest(req) - switch { - case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): - return nil, err - case err == nil: - return resp, cli.checkResponseErr(resp) - default: + if err != nil { + // Failed to connect or context error. return resp, err } + + // Successfully made a request; return the response and handle any + // API HTTP response errors. + return resp, checkResponseErr(resp) } +// doRequest sends an HTTP request and returns an HTTP response. It is a +// wrapper around [http.Client.Do] with extra handling to decorate errors. +// +// Otherwise, it behaves identical to [http.Client.Do]; an error is returned +// when failing to make a connection, On error, any Response can be ignored. +// A non-2xx status code doesn't cause an error. func (cli *Client) doRequest(req *http.Request) (*http.Response, error) { resp, err := cli.client.Do(req) - if err != nil { - if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { - return nil, errConnectionFailed{fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)} - } + if err == nil { + return resp, nil + } - if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return nil, errConnectionFailed{errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")} - } + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return nil, errConnectionFailed{fmt.Errorf("%w.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)} + } - // Don't decorate context sentinel errors; users may be comparing to - // them directly. - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return nil, err - } + const ( + // Go 1.25 / TLS 1.3 may produce a generic "handshake failure" + // whereas TLS 1.2 may produce a "bad certificate" TLS alert. + // See https://github.com/golang/go/issues/56371 + // + // > https://tip.golang.org/doc/go1.12#tls_1_3 + // > + // > In TLS 1.3 the client is the last one to speak in the handshake, so if + // > it causes an error to occur on the server, it will be returned on the + // > client by the first Read, not by Handshake. For example, that will be + // > the case if the server rejects the client certificate. + // + // https://github.com/golang/go/blob/go1.25.1/src/crypto/tls/alert.go#L71-L72 + alertBadCertificate = "bad certificate" // go1.24 / TLS 1.2 + alertHandshakeFailure = "handshake failure" // go1.25 / TLS 1.3 + ) + + // TODO(thaJeztah): see if we can use errors.As for a [crypto/tls.AlertError] instead of bare string matching. + if cli.scheme == "https" && (strings.Contains(err.Error(), alertHandshakeFailure) || strings.Contains(err.Error(), alertBadCertificate)) { + return nil, errConnectionFailed{fmt.Errorf("the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings: %w", err)} + } - var uErr *url.Error - if errors.As(err, &uErr) { - var nErr *net.OpError - if errors.As(uErr.Err, &nErr) { - if os.IsPermission(nErr.Err) { - return nil, errConnectionFailed{errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)} - } - } - } + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, err + } - var nErr net.Error - if errors.As(err, &nErr) { - // FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)? - if nErr.Timeout() { - return nil, connectionFailed(cli.host) - } - if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") { - return nil, connectionFailed(cli.host) - } - } + if errors.Is(err, os.ErrPermission) { + // Don't include request errors ("Get "http://%2Fvar%2Frun%2Fdocker.sock/v1.51/version"), + // which are irrelevant if we weren't able to connect. + return nil, errConnectionFailed{fmt.Errorf("permission denied while trying to connect to the docker API at %v", cli.host)} + } + if errors.Is(err, os.ErrNotExist) { + // Unwrap the error to remove request errors ("Get "http://%2Fvar%2Frun%2Fdocker.sock/v1.51/version"), + // which are irrelevant if we weren't able to connect. + err = errors.Unwrap(err) + return nil, errConnectionFailed{fmt.Errorf("failed to connect to the docker API at %v; check if the path is correct and if the daemon is running: %w", cli.host, err)} + } + var dnsErr *net.DNSError + if errors.As(err, &dnsErr) { + return nil, errConnectionFailed{fmt.Errorf("failed to connect to the docker API at %v: %w", cli.host, dnsErr)} + } - // Although there's not a strongly typed error for this in go-winio, - // lots of people are using the default configuration for the docker - // daemon on Windows where the daemon is listening on a named pipe - // `//./pipe/docker_engine, and the client must be running elevated. - // Give users a clue rather than the not-overly useful message - // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: - // open //./pipe/docker_engine: The system cannot find the file specified.`. - // Note we can't string compare "The system cannot find the file specified" as - // this is localised - for example in French the error would be - // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` - if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { - // Checks if client is running with elevated privileges - if f, elevatedErr := os.Open(`\\.\PHYSICALDRIVE0`); elevatedErr != nil { - err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") - } else { - _ = f.Close() - err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") - } + var nErr net.Error + if errors.As(err, &nErr) { + // FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)? + if nErr.Timeout() { + return nil, connectionFailed(cli.host) + } + if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") { + return nil, connectionFailed(cli.host) } + } - return nil, errConnectionFailed{errors.Wrap(err, "error during connect")} + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open(`\\.\PHYSICALDRIVE0`); elevatedErr != nil { + err = fmt.Errorf("in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect: %w", err) + } else { + _ = f.Close() + err = fmt.Errorf("this error may indicate that the docker daemon is not running: %w", err) + } } - return resp, nil + return nil, errConnectionFailed{fmt.Errorf("error during connect: %w", err)} } -func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) { +func checkResponseErr(serverResp *http.Response) (retErr error) { if serverResp == nil { return nil } @@ -235,20 +274,20 @@ func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) { var daemonErr error if serverResp.Header.Get("Content-Type") == "application/json" { - var errorResponse types.ErrorResponse + var errorResponse common.ErrorResponse if err := json.Unmarshal(body, &errorResponse); err != nil { - return errors.Wrap(err, "Error reading JSON") + return fmt.Errorf("error reading JSON: %w", err) } if errorResponse.Message == "" { // Error-message is empty, which means that we successfully parsed the // JSON-response (no error produced), but it didn't contain an error // message. This could either be because the response was empty, or // the response was valid JSON, but not with the expected schema - // ([types.ErrorResponse]). + // ([common.ErrorResponse]). // // We cannot use "strict" JSON handling (json.NewDecoder with DisallowUnknownFields) // due to the API using an open schema (we must anticipate fields - // being added to [types.ErrorResponse] in the future, and not + // being added to [common.ErrorResponse] in the future, and not // reject those responses. // // For these cases, we construct an error with the status-code @@ -264,22 +303,18 @@ func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) { daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) } } else { - // Fall back to returning the response as-is for API versions < 1.24 - // that didn't support JSON error responses, and for situations - // where a plain text error is returned. This branch may also catch + // Fall back to returning the response as-is for situations where a + // plain text error is returned. This branch may also catch // situations where a proxy is involved, returning a HTML response. daemonErr = errors.New(strings.TrimSpace(string(body))) } - return errors.Wrap(daemonErr, "Error response from daemon") + return fmt.Errorf("Error response from daemon: %w", daemonErr) } func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Request { // Add CLI Config's HTTP Headers BEFORE we set the Docker headers // then the user can't change OUR headers for k, v := range cli.customHTTPHeaders { - if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" { - continue - } req.Header.Set(k, v) } @@ -297,14 +332,14 @@ func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Requ return req } -func encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) +func jsonEncode(data any) (io.Reader, error) { + var params bytes.Buffer if data != nil { - if err := json.NewEncoder(params).Encode(data); err != nil { + if err := json.NewEncoder(¶ms).Encode(data); err != nil { return nil, err } } - return params, nil + return ¶ms, nil } func ensureReaderClosed(response *http.Response) { diff --git a/vendor/github.com/moby/moby/client/secret_create.go b/vendor/github.com/moby/moby/client/secret_create.go new file mode 100644 index 000000000..8e59a42ce --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretCreateOptions holds options for creating a secret. +type SecretCreateOptions struct { + Spec swarm.SecretSpec +} + +// SecretCreateResult holds the result from the [Client.SecretCreate] method. +type SecretCreateResult struct { + ID string +} + +// SecretCreate creates a new secret. +func (cli *Client) SecretCreate(ctx context.Context, options SecretCreateOptions) (SecretCreateResult, error) { + resp, err := cli.post(ctx, "/secrets/create", nil, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretCreateResult{}, err + } + + var out swarm.ConfigCreateResponse + err = json.NewDecoder(resp.Body).Decode(&out) + if err != nil { + return SecretCreateResult{}, err + } + return SecretCreateResult{ID: out.ID}, nil +} diff --git a/vendor/github.com/moby/moby/client/secret_inspect.go b/vendor/github.com/moby/moby/client/secret_inspect.go new file mode 100644 index 000000000..fefd4cd23 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretInspectOptions holds options for inspecting a secret. +type SecretInspectOptions struct { + // Add future optional parameters here +} + +// SecretInspectResult holds the result from the [Client.SecretInspect]. method. +type SecretInspectResult struct { + Secret swarm.Secret + Raw json.RawMessage +} + +// SecretInspect returns the secret information with raw data. +func (cli *Client) SecretInspect(ctx context.Context, id string, options SecretInspectOptions) (SecretInspectResult, error) { + id, err := trimID("secret", id) + if err != nil { + return SecretInspectResult{}, err + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + return SecretInspectResult{}, err + } + + var out SecretInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Secret) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/secret_list.go b/vendor/github.com/moby/moby/client/secret_list.go new file mode 100644 index 000000000..be3695575 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters Filters +} + +// SecretListResult holds the result from the [client.SecretList] method. +type SecretListResult struct { + Items []swarm.Secret +} + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options SecretListOptions) (SecretListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + + resp, err := cli.get(ctx, "/secrets", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretListResult{}, err + } + + var out SecretListResult + err = json.NewDecoder(resp.Body).Decode(&out.Items) + if err != nil { + return SecretListResult{}, err + } + return out, nil +} diff --git a/vendor/github.com/moby/moby/client/secret_remove.go b/vendor/github.com/moby/moby/client/secret_remove.go new file mode 100644 index 000000000..8554f3f21 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_remove.go @@ -0,0 +1,25 @@ +package client + +import "context" + +type SecretRemoveOptions struct { + // Add future optional parameters here +} + +type SecretRemoveResult struct { + // Add future fields here +} + +// SecretRemove removes a secret. +func (cli *Client) SecretRemove(ctx context.Context, id string, options SecretRemoveOptions) (SecretRemoveResult, error) { + id, err := trimID("secret", id) + if err != nil { + return SecretRemoveResult{}, err + } + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretRemoveResult{}, err + } + return SecretRemoveResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/secret_update.go b/vendor/github.com/moby/moby/client/secret_update.go new file mode 100644 index 000000000..c88ad1106 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_update.go @@ -0,0 +1,32 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretUpdateOptions holds options for updating a secret. +type SecretUpdateOptions struct { + Version swarm.Version + Spec swarm.SecretSpec +} + +type SecretUpdateResult struct{} + +// SecretUpdate attempts to update a secret. +func (cli *Client) SecretUpdate(ctx context.Context, id string, options SecretUpdateOptions) (SecretUpdateResult, error) { + id, err := trimID("secret", id) + if err != nil { + return SecretUpdateResult{}, err + } + query := url.Values{} + query.Set("version", options.Version.String()) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretUpdateResult{}, err + } + return SecretUpdateResult{}, nil +} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/moby/moby/client/service_create.go similarity index 63% rename from vendor/github.com/docker/docker/client/service_create.go rename to vendor/github.com/moby/moby/client/service_create.go index 6b9932ae2..204d21816 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/moby/moby/client/service_create.go @@ -1,88 +1,95 @@ -package client // import "github.com/docker/docker/client" +package client import ( "context" "encoding/json" + "errors" "fmt" "net/http" "strings" "github.com/distribution/reference" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/versions" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/api/types/swarm" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) -// ServiceCreate creates a new service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) { - var response swarm.ServiceCreateResponse +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + Spec swarm.ServiceSpec - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return response, err - } + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResult represents the result of creating a service. +type ServiceCreateResult struct { + // ID is the ID of the created service. + ID string + // Warnings is a list of warnings that occurred during service creation. + Warnings []string +} + +// ServiceCreate creates a new service. +func (cli *Client) ServiceCreate(ctx context.Context, options ServiceCreateOptions) (ServiceCreateResult, error) { // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container - if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { - service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + if options.Spec.TaskTemplate.ContainerSpec == nil && (options.Spec.TaskTemplate.Runtime == "" || options.Spec.TaskTemplate.Runtime == swarm.RuntimeContainer) { + options.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} } - if err := validateServiceSpec(service); err != nil { - return response, err - } - if versions.LessThan(cli.version, "1.30") { - if err := validateAPIVersion(service, cli.version); err != nil { - return response, err - } + if err := validateServiceSpec(options.Spec); err != nil { + return ServiceCreateResult{}, err } // ensure that the image is tagged - var resolveWarning string + var warnings []string switch { - case service.TaskTemplate.ContainerSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg + case options.Spec.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + options.Spec.TaskTemplate.ContainerSpec.Image = taggedImg } if options.QueryRegistry { - resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + resolveWarning := resolveContainerSpecImage(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth) + warnings = append(warnings, resolveWarning) } - case service.TaskTemplate.PluginSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg + case options.Spec.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + options.Spec.TaskTemplate.PluginSpec.Remote = taggedImg } if options.QueryRegistry { - resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + resolveWarning := resolvePluginSpecRemote(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth) + warnings = append(warnings, resolveWarning) } } headers := http.Header{} - if versions.LessThan(cli.version, "1.30") { - // the custom "version" header was used by engine API before 20.10 - // (API 1.30) to switch between client- and server-side lookup of - // image digests. - headers["version"] = []string{cli.version} - } if options.EncodedRegistryAuth != "" { headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} } - resp, err := cli.post(ctx, "/services/create", nil, service, headers) + resp, err := cli.post(ctx, "/services/create", nil, options.Spec, headers) defer ensureReaderClosed(resp) if err != nil { - return response, err + return ServiceCreateResult{}, err } + var response swarm.ServiceCreateResponse err = json.NewDecoder(resp.Body).Decode(&response) - if resolveWarning != "" { - response.Warnings = append(response.Warnings, resolveWarning) - } + warnings = append(warnings, response.Warnings...) - return response, err + return ServiceCreateResult{ + ID: response.ID, + Warnings: warnings, + }, err } func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { @@ -118,7 +125,9 @@ func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, tas } func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { - distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + distributionInspect, err := cli.DistributionInspect(ctx, image, DistributionInspectOptions{ + EncodedRegistryAuth: encodedAuth, + }) var platforms []swarm.Platform if err != nil { return "", nil, err @@ -154,7 +163,7 @@ func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, ima func imageWithDigestString(image string, dgst digest.Digest) string { namedRef, err := reference.ParseNormalizedNamed(image) if err == nil { - if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + if _, hasDigest := namedRef.(reference.Digested); !hasDigest { // ensure that image gets a default tag if none is provided img, err := reference.WithDigest(namedRef, dgst) if err == nil { @@ -195,18 +204,3 @@ func validateServiceSpec(s swarm.ServiceSpec) error { } return nil } - -func validateAPIVersion(c swarm.ServiceSpec, apiVersion string) error { - for _, m := range c.TaskTemplate.ContainerSpec.Mounts { - if m.BindOptions != nil { - if m.BindOptions.NonRecursive && versions.LessThan(apiVersion, "1.40") { - return errors.Errorf("bind-recursive=disabled requires API v1.40 or later") - } - // ReadOnlyNonRecursive can be safely ignored when API < 1.44 - if m.BindOptions.ReadOnlyForceRecursive && versions.LessThan(apiVersion, "1.44") { - return errors.Errorf("bind-recursive=readonly requires API v1.44 or later") - } - } - } - return nil -} diff --git a/vendor/github.com/moby/moby/client/service_inspect.go b/vendor/github.com/moby/moby/client/service_inspect.go new file mode 100644 index 000000000..9bda43f86 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_inspect.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ServiceInspectOptions holds parameters related to the service inspect operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// ServiceInspectResult represents the result of a service inspect operation. +type ServiceInspectResult struct { + Service swarm.Service + Raw json.RawMessage +} + +// ServiceInspect retrieves detailed information about a specific service by its ID. +func (cli *Client) ServiceInspect(ctx context.Context, serviceID string, options ServiceInspectOptions) (ServiceInspectResult, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return ServiceInspectResult{}, err + } + + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", options.InsertDefaults)) + resp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + if err != nil { + return ServiceInspectResult{}, err + } + + var out ServiceInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Service) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/service_list.go b/vendor/github.com/moby/moby/client/service_list.go new file mode 100644 index 000000000..94b5204be --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_list.go @@ -0,0 +1,44 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters Filters + + // Status indicates whether the server should include the service task + // count of running and desired tasks. + Status bool +} + +// ServiceListResult represents the result of a service list operation. +type ServiceListResult struct { + Items []swarm.Service +} + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options ServiceListOptions) (ServiceListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + + if options.Status { + query.Set("status", "true") + } + + resp, err := cli.get(ctx, "/services", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ServiceListResult{}, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.Body).Decode(&services) + return ServiceListResult{Items: services}, err +} diff --git a/vendor/github.com/moby/moby/client/service_logs.go b/vendor/github.com/moby/moby/client/service_logs.go new file mode 100644 index 000000000..fd565db5a --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_logs.go @@ -0,0 +1,91 @@ +package client + +import ( + "context" + "fmt" + "io" + "net/url" + "time" + + "github.com/moby/moby/client/internal/timestamp" +) + +// ServiceLogsOptions holds parameters to filter logs with. +type ServiceLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ServiceLogsResult holds the result of a service logs operation. +// It implements [io.ReadCloser]. +// It's up to the caller to close the stream. +type ServiceLogsResult interface { + io.ReadCloser +} + +// ServiceLogs returns the logs generated by a service in a [ServiceLogsResult]. +// as an [io.ReadCloser]. Callers should close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options ServiceLogsOptions) (ServiceLogsResult, error) { + // TODO(thaJeztah): this function needs documentation about the format of ths stream (similar to for container logs) + // TODO(thaJeztah): migrate CLI utilities to the client where suitable; https://github.com/docker/cli/blob/v29.0.0-rc.1/cli/command/service/logs.go#L73-L348 + + serviceID, err := trimID("service", serviceID) + if err != nil { + return nil, err + } + + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, fmt.Errorf(`invalid value for "since": %w`, err) + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return &serviceLogsResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type serviceLogsResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*serviceLogsResult)(nil) + _ ServiceLogsResult = (*serviceLogsResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/service_remove.go b/vendor/github.com/moby/moby/client/service_remove.go new file mode 100644 index 000000000..163689b69 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_remove.go @@ -0,0 +1,25 @@ +package client + +import "context" + +// ServiceRemoveOptions contains options for removing a service. +type ServiceRemoveOptions struct { + // No options currently; placeholder for future use +} + +// ServiceRemoveResult contains the result of removing a service. +type ServiceRemoveResult struct { + // No fields currently; placeholder for future use +} + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string, options ServiceRemoveOptions) (ServiceRemoveResult, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return ServiceRemoveResult{}, err + } + + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + defer ensureReaderClosed(resp) + return ServiceRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/service_update.go b/vendor/github.com/moby/moby/client/service_update.go new file mode 100644 index 000000000..9e6b52781 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_update.go @@ -0,0 +1,112 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/api/types/swarm" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + Version swarm.Version + Spec swarm.ServiceSpec + + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom swarm.RegistryAuthSource + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceUpdateResult represents the result of a service update. +type ServiceUpdateResult struct { + // Warnings contains any warnings that occurred during the update. + Warnings []string +} + +// ServiceUpdate updates a Service. The version number is required to avoid +// conflicting writes. It must be the value as set *before* the update. +// You can find this value in the [swarm.Service.Meta] field, which can +// be found using [Client.ServiceInspectWithRaw]. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, options ServiceUpdateOptions) (ServiceUpdateResult, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return ServiceUpdateResult{}, err + } + + if err := validateServiceSpec(options.Spec); err != nil { + return ServiceUpdateResult{}, err + } + + query := url.Values{} + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", string(options.RegistryAuthFrom)) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", options.Version.String()) + + // ensure that the image is tagged + var warnings []string + switch { + case options.Spec.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + options.Spec.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + resolveWarning := resolveContainerSpecImage(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth) + warnings = append(warnings, resolveWarning) + } + case options.Spec.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + options.Spec.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + resolveWarning := resolvePluginSpecRemote(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth) + warnings = append(warnings, resolveWarning) + } + } + + headers := http.Header{} + if options.EncodedRegistryAuth != "" { + headers.Set(registry.AuthHeader, options.EncodedRegistryAuth) + } + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, options.Spec, headers) + defer ensureReaderClosed(resp) + if err != nil { + return ServiceUpdateResult{}, err + } + + var response swarm.ServiceUpdateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + warnings = append(warnings, response.Warnings...) + return ServiceUpdateResult{Warnings: warnings}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go b/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go new file mode 100644 index 000000000..03ecce409 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go @@ -0,0 +1,26 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmGetUnlockKeyResult contains the swarm unlock key. +type SwarmGetUnlockKeyResult struct { + Key string +} + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (SwarmGetUnlockKeyResult, error) { + resp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SwarmGetUnlockKeyResult{}, err + } + + var response swarm.UnlockKeyResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return SwarmGetUnlockKeyResult{Key: response.UnlockKey}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_init.go b/vendor/github.com/moby/moby/client/swarm_init.go new file mode 100644 index 000000000..caad56085 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_init.go @@ -0,0 +1,54 @@ +package client + +import ( + "context" + "encoding/json" + "net/netip" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmInitOptions contains options for initializing a new swarm. +type SwarmInitOptions struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec swarm.Spec + AutoLockManagers bool + Availability swarm.NodeAvailability + DefaultAddrPool []netip.Prefix + SubnetSize uint32 +} + +// SwarmInitResult contains the result of a SwarmInit operation. +type SwarmInitResult struct { + NodeID string +} + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, options SwarmInitOptions) (SwarmInitResult, error) { + req := swarm.InitRequest{ + ListenAddr: options.ListenAddr, + AdvertiseAddr: options.AdvertiseAddr, + DataPathAddr: options.DataPathAddr, + DataPathPort: options.DataPathPort, + ForceNewCluster: options.ForceNewCluster, + Spec: options.Spec, + AutoLockManagers: options.AutoLockManagers, + Availability: options.Availability, + DefaultAddrPool: options.DefaultAddrPool, + SubnetSize: options.SubnetSize, + } + + resp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SwarmInitResult{}, err + } + + var nodeID string + err = json.NewDecoder(resp.Body).Decode(&nodeID) + return SwarmInitResult{NodeID: nodeID}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_inspect.go b/vendor/github.com/moby/moby/client/swarm_inspect.go new file mode 100644 index 000000000..40e1d018a --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_inspect.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmInspectOptions holds options for inspecting a swarm. +type SwarmInspectOptions struct { + // Add future optional parameters here +} + +// SwarmInspectResult represents the result of a SwarmInspect operation. +type SwarmInspectResult struct { + Swarm swarm.Swarm +} + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context, options SwarmInspectOptions) (SwarmInspectResult, error) { + resp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SwarmInspectResult{}, err + } + + var s swarm.Swarm + err = json.NewDecoder(resp.Body).Decode(&s) + return SwarmInspectResult{Swarm: s}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_join.go b/vendor/github.com/moby/moby/client/swarm_join.go new file mode 100644 index 000000000..66a754482 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_join.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmJoinOptions specifies options for joining a swarm. +type SwarmJoinOptions struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability swarm.NodeAvailability +} + +// SwarmJoinResult contains the result of joining a swarm. +type SwarmJoinResult struct { + // No fields currently; placeholder for future use +} + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, options SwarmJoinOptions) (SwarmJoinResult, error) { + req := swarm.JoinRequest{ + ListenAddr: options.ListenAddr, + AdvertiseAddr: options.AdvertiseAddr, + DataPathAddr: options.DataPathAddr, + RemoteAddrs: options.RemoteAddrs, + JoinToken: options.JoinToken, + Availability: options.Availability, + } + + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + defer ensureReaderClosed(resp) + return SwarmJoinResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_leave.go b/vendor/github.com/moby/moby/client/swarm_leave.go new file mode 100644 index 000000000..a65a13de3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_leave.go @@ -0,0 +1,25 @@ +package client + +import ( + "context" + "net/url" +) + +// SwarmLeaveOptions contains options for leaving a swarm. +type SwarmLeaveOptions struct { + Force bool +} + +// SwarmLeaveResult represents the result of a SwarmLeave operation. +type SwarmLeaveResult struct{} + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, options SwarmLeaveOptions) (SwarmLeaveResult, error) { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + defer ensureReaderClosed(resp) + return SwarmLeaveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_unlock.go b/vendor/github.com/moby/moby/client/swarm_unlock.go new file mode 100644 index 000000000..92335afb5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_unlock.go @@ -0,0 +1,25 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmUnlockOptions specifies options for unlocking a swarm. +type SwarmUnlockOptions struct { + Key string +} + +// SwarmUnlockResult represents the result of unlocking a swarm. +type SwarmUnlockResult struct{} + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, options SwarmUnlockOptions) (SwarmUnlockResult, error) { + req := &swarm.UnlockRequest{ + UnlockKey: options.Key, + } + resp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + defer ensureReaderClosed(resp) + return SwarmUnlockResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_update.go b/vendor/github.com/moby/moby/client/swarm_update.go new file mode 100644 index 000000000..81f62b2c0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_update.go @@ -0,0 +1,33 @@ +package client + +import ( + "context" + "net/url" + "strconv" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmUpdateOptions contains options for updating a swarm. +type SwarmUpdateOptions struct { + Version swarm.Version + Spec swarm.Spec + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} + +// SwarmUpdateResult represents the result of a SwarmUpdate operation. +type SwarmUpdateResult struct{} + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, options SwarmUpdateOptions) (SwarmUpdateResult, error) { + query := url.Values{} + query.Set("version", options.Version.String()) + query.Set("rotateWorkerToken", strconv.FormatBool(options.RotateWorkerToken)) + query.Set("rotateManagerToken", strconv.FormatBool(options.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", strconv.FormatBool(options.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + return SwarmUpdateResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/system_disk_usage.go b/vendor/github.com/moby/moby/client/system_disk_usage.go new file mode 100644 index 000000000..64a369df8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/system_disk_usage.go @@ -0,0 +1,332 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "slices" + + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/image" + "github.com/moby/moby/api/types/system" + "github.com/moby/moby/api/types/volume" + "github.com/moby/moby/client/pkg/versions" +) + +// DiskUsageOptions holds parameters for [Client.DiskUsage] operations. +type DiskUsageOptions struct { + // Containers controls whether container disk usage should be computed. + Containers bool + + // Images controls whether image disk usage should be computed. + Images bool + + // BuildCache controls whether build cache disk usage should be computed. + BuildCache bool + + // Volumes controls whether volume disk usage should be computed. + Volumes bool + + // Verbose enables more detailed disk usage information. + Verbose bool +} + +// DiskUsageResult is the result of [Client.DiskUsage] operations. +type DiskUsageResult struct { + // Containers holds container disk usage information. + Containers ContainersDiskUsage + + // Images holds image disk usage information. + Images ImagesDiskUsage + + // BuildCache holds build cache disk usage information. + BuildCache BuildCacheDiskUsage + + // Volumes holds volume disk usage information. + Volumes VolumesDiskUsage +} + +// ContainersDiskUsage contains disk usage information for containers. +type ContainersDiskUsage struct { + // ActiveCount is the number of active containers. + ActiveCount int64 + + // TotalCount is the total number of containers. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all containers. + TotalSize int64 + + // Items holds detailed information about each container. + Items []container.Summary +} + +// ImagesDiskUsage contains disk usage information for images. +type ImagesDiskUsage struct { + // ActiveCount is the number of active images. + ActiveCount int64 + + // TotalCount is the total number of images. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all images. + TotalSize int64 + + // Items holds detailed information about each image. + Items []image.Summary +} + +// VolumesDiskUsage contains disk usage information for volumes. +type VolumesDiskUsage struct { + // ActiveCount is the number of active volumes. + ActiveCount int64 + + // TotalCount is the total number of volumes. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all volumes. + TotalSize int64 + + // Items holds detailed information about each volume. + Items []volume.Volume +} + +// BuildCacheDiskUsage contains disk usage information for build cache. +type BuildCacheDiskUsage struct { + // ActiveCount is the number of active build cache records. + ActiveCount int64 + + // TotalCount is the total number of build cache records. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all build cache records. + TotalSize int64 + + // Items holds detailed information about each build cache record. + Items []build.CacheRecord +} + +// DiskUsage requests the current data usage from the daemon. +func (cli *Client) DiskUsage(ctx context.Context, options DiskUsageOptions) (DiskUsageResult, error) { + query := url.Values{} + + for _, t := range []struct { + flag bool + sysObj system.DiskUsageObject + }{ + {options.Containers, system.ContainerObject}, + {options.Images, system.ImageObject}, + {options.Volumes, system.VolumeObject}, + {options.BuildCache, system.BuildCacheObject}, + } { + if t.flag { + query.Add("type", string(t.sysObj)) + } + } + + if options.Verbose { + query.Set("verbose", "1") + } + + resp, err := cli.get(ctx, "/system/df", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return DiskUsageResult{}, err + } + + if versions.LessThan(cli.version, "1.52") { + // Generate result from a legacy response. + var du legacyDiskUsage + if err := json.NewDecoder(resp.Body).Decode(&du); err != nil { + return DiskUsageResult{}, fmt.Errorf("retrieving disk usage: %v", err) + } + + return diskUsageResultFromLegacyAPI(&du), nil + } + + var du system.DiskUsage + if err := json.NewDecoder(resp.Body).Decode(&du); err != nil { + return DiskUsageResult{}, fmt.Errorf("retrieving disk usage: %v", err) + } + + var r DiskUsageResult + if idu := du.ImageUsage; idu != nil { + r.Images = ImagesDiskUsage{ + ActiveCount: idu.ActiveCount, + Reclaimable: idu.Reclaimable, + TotalCount: idu.TotalCount, + TotalSize: idu.TotalSize, + } + + if options.Verbose { + r.Images.Items = slices.Clone(idu.Items) + } + } + + if cdu := du.ContainerUsage; cdu != nil { + r.Containers = ContainersDiskUsage{ + ActiveCount: cdu.ActiveCount, + Reclaimable: cdu.Reclaimable, + TotalCount: cdu.TotalCount, + TotalSize: cdu.TotalSize, + } + + if options.Verbose { + r.Containers.Items = slices.Clone(cdu.Items) + } + } + + if bdu := du.BuildCacheUsage; bdu != nil { + r.BuildCache = BuildCacheDiskUsage{ + ActiveCount: bdu.ActiveCount, + Reclaimable: bdu.Reclaimable, + TotalCount: bdu.TotalCount, + TotalSize: bdu.TotalSize, + } + + if options.Verbose { + r.BuildCache.Items = slices.Clone(bdu.Items) + } + } + + if vdu := du.VolumeUsage; vdu != nil { + r.Volumes = VolumesDiskUsage{ + ActiveCount: vdu.ActiveCount, + Reclaimable: vdu.Reclaimable, + TotalCount: vdu.TotalCount, + TotalSize: vdu.TotalSize, + } + + if options.Verbose { + r.Volumes.Items = slices.Clone(vdu.Items) + } + } + + return r, nil +} + +// legacyDiskUsage is the response as was used by API < v1.52. +type legacyDiskUsage struct { + LayersSize int64 `json:"LayersSize,omitempty"` + Images []image.Summary `json:"Images,omitzero"` + Containers []container.Summary `json:"Containers,omitzero"` + Volumes []volume.Volume `json:"Volumes,omitzero"` + BuildCache []build.CacheRecord `json:"BuildCache,omitzero"` +} + +func diskUsageResultFromLegacyAPI(du *legacyDiskUsage) DiskUsageResult { + return DiskUsageResult{ + Images: imageDiskUsageFromLegacyAPI(du), + Containers: containerDiskUsageFromLegacyAPI(du), + BuildCache: buildCacheDiskUsageFromLegacyAPI(du), + Volumes: volumeDiskUsageFromLegacyAPI(du), + } +} + +func imageDiskUsageFromLegacyAPI(du *legacyDiskUsage) ImagesDiskUsage { + idu := ImagesDiskUsage{ + TotalSize: du.LayersSize, + TotalCount: int64(len(du.Images)), + Items: du.Images, + } + + var used int64 + for _, i := range idu.Items { + if i.Containers > 0 { + idu.ActiveCount++ + + if i.Size == -1 || i.SharedSize == -1 { + continue + } + used += (i.Size - i.SharedSize) + } + } + + if idu.TotalCount > 0 { + idu.Reclaimable = idu.TotalSize - used + } + + return idu +} + +func containerDiskUsageFromLegacyAPI(du *legacyDiskUsage) ContainersDiskUsage { + cdu := ContainersDiskUsage{ + TotalCount: int64(len(du.Containers)), + Items: du.Containers, + } + + var used int64 + for _, c := range cdu.Items { + cdu.TotalSize += c.SizeRw + switch c.State { + case container.StateRunning, container.StatePaused, container.StateRestarting: + cdu.ActiveCount++ + used += c.SizeRw + } + } + + cdu.Reclaimable = cdu.TotalSize - used + return cdu +} + +func buildCacheDiskUsageFromLegacyAPI(du *legacyDiskUsage) BuildCacheDiskUsage { + bdu := BuildCacheDiskUsage{ + TotalCount: int64(len(du.BuildCache)), + Items: du.BuildCache, + } + + var used int64 + for _, b := range du.BuildCache { + if !b.Shared { + bdu.TotalSize += b.Size + } + + if b.InUse { + bdu.ActiveCount++ + if !b.Shared { + used += b.Size + } + } + } + + bdu.Reclaimable = bdu.TotalSize - used + return bdu +} + +func volumeDiskUsageFromLegacyAPI(du *legacyDiskUsage) VolumesDiskUsage { + vdu := VolumesDiskUsage{ + TotalCount: int64(len(du.Volumes)), + Items: du.Volumes, + } + + var used int64 + for _, v := range vdu.Items { + // Ignore volumes with no usage data + if v.UsageData != nil { + if v.UsageData.RefCount > 0 { + vdu.ActiveCount++ + used += v.UsageData.Size + } + if v.UsageData.Size > 0 { + vdu.TotalSize += v.UsageData.Size + } + } + } + + vdu.Reclaimable = vdu.TotalSize - used + return vdu +} diff --git a/vendor/github.com/moby/moby/client/system_events.go b/vendor/github.com/moby/moby/client/system_events.go new file mode 100644 index 000000000..748e01208 --- /dev/null +++ b/vendor/github.com/moby/moby/client/system_events.go @@ -0,0 +1,114 @@ +package client + +import ( + "context" + "net/http" + "net/url" + "time" + + "github.com/moby/moby/api/types" + "github.com/moby/moby/api/types/events" + "github.com/moby/moby/client/internal" + "github.com/moby/moby/client/internal/timestamp" +) + +// EventsListOptions holds parameters to filter events with. +type EventsListOptions struct { + Since string + Until string + Filters Filters +} + +// EventsResult holds the result of an Events query. +type EventsResult struct { + Messages <-chan events.Message + Err <-chan error +} + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an [io.EOF] error is +// sent over the error channel. If an error is sent, all processing is stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options EventsListOptions) EventsResult { + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(options) + if err != nil { + close(started) + errs <- err + return + } + + headers := http.Header{} + headers.Add("Accept", types.MediaTypeJSONSequence) + headers.Add("Accept", types.MediaTypeNDJSON) + resp, err := cli.get(ctx, "/events", query, headers) + if err != nil { + close(started) + errs <- err + return + } + defer resp.Body.Close() + + contentType := resp.Header.Get("Content-Type") + decoder := internal.NewJSONStreamDecoder(resp.Body, contentType) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return EventsResult{ + Messages: messages, + Err: errs, + } +} + +func buildEventsQueryParams(options EventsListOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timestamp.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + options.Filters.updateURLValues(query) + + return query, nil +} diff --git a/vendor/github.com/moby/moby/client/system_info.go b/vendor/github.com/moby/moby/client/system_info.go new file mode 100644 index 000000000..4c0a2238e --- /dev/null +++ b/vendor/github.com/moby/moby/client/system_info.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/system" +) + +type InfoOptions struct { + // No options currently; placeholder for future use +} + +type SystemInfoResult struct { + Info system.Info +} + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context, options InfoOptions) (SystemInfoResult, error) { + resp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SystemInfoResult{}, err + } + + var info system.Info + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + return SystemInfoResult{}, fmt.Errorf("Error reading remote info: %v", err) + } + + return SystemInfoResult{Info: info}, nil +} diff --git a/vendor/github.com/moby/moby/client/task_inspect.go b/vendor/github.com/moby/moby/client/task_inspect.go new file mode 100644 index 000000000..96edcb09f --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_inspect.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// TaskInspectOptions contains options for inspecting a task. +type TaskInspectOptions struct { + // Currently no options are defined. +} + +// TaskInspectResult contains the result of a task inspection. +type TaskInspectResult struct { + Task swarm.Task + Raw json.RawMessage +} + +// TaskInspect returns the task information and its raw representation. +func (cli *Client) TaskInspect(ctx context.Context, taskID string, options TaskInspectOptions) (TaskInspectResult, error) { + taskID, err := trimID("task", taskID) + if err != nil { + return TaskInspectResult{}, err + } + + resp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + return TaskInspectResult{}, err + } + + var out TaskInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Task) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/task_list.go b/vendor/github.com/moby/moby/client/task_list.go new file mode 100644 index 000000000..5f7c41bb9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_list.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters Filters +} + +// TaskListResult contains the result of a task list operation. +type TaskListResult struct { + Items []swarm.Task +} + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options TaskListOptions) (TaskListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + + resp, err := cli.get(ctx, "/tasks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return TaskListResult{}, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.Body).Decode(&tasks) + return TaskListResult{Items: tasks}, err +} diff --git a/vendor/github.com/moby/moby/client/task_logs.go b/vendor/github.com/moby/moby/client/task_logs.go new file mode 100644 index 000000000..e4de019f3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_logs.go @@ -0,0 +1,84 @@ +package client + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/moby/moby/client/internal/timestamp" +) + +// TaskLogsOptions holds parameters to filter logs with. +type TaskLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// TaskLogsResult holds the result of a task logs operation. +// It implements [io.ReadCloser]. +type TaskLogsResult interface { + io.ReadCloser +} + +// TaskLogs returns the logs generated by a service in a [TaskLogsResult]. +// as an [io.ReadCloser]. Callers should close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options TaskLogsOptions) (TaskLogsResult, error) { + // TODO(thaJeztah): this function needs documentation about the format of ths stream (similar to for container logs) + // TODO(thaJeztah): migrate CLI utilities to the client where suitable; https://github.com/docker/cli/blob/v29.0.0-rc.1/cli/command/service/logs.go#L73-L348 + + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return &taskLogsResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type taskLogsResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*taskLogsResult)(nil) + _ ContainerLogsResult = (*taskLogsResult)(nil) +) diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/moby/moby/client/utils.go similarity index 61% rename from vendor/github.com/docker/docker/client/utils.go rename to vendor/github.com/moby/moby/client/utils.go index 27f2b9884..6a5fc46ea 100644 --- a/vendor/github.com/docker/docker/client/utils.go +++ b/vendor/github.com/moby/moby/client/utils.go @@ -1,19 +1,20 @@ -package client // import "github.com/docker/docker/client" +package client import ( + "bytes" + "context" "encoding/json" + "errors" "fmt" - "net/url" + "io" + "net/http" "strings" + "sync" cerrdefs "github.com/containerd/errdefs" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/internal/lazyregexp" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) -var headerRegexp = lazyregexp.New(`\ADocker/.+\s\((.+)\)\z`) - type emptyIDError string func (e emptyIDError) InvalidParameter() {} @@ -25,36 +26,12 @@ func (e emptyIDError) Error() string { // trimID trims the given object-ID / name, returning an error if it's empty. func trimID(objType, id string) (string, error) { id = strings.TrimSpace(id) - if len(id) == 0 { + if id == "" { return "", emptyIDError(objType) } return id, nil } -// getDockerOS returns the operating system based on the server header from the daemon. -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} - -// getFiltersQuery returns a url query with "filters" query term, based on the -// filters provided. -func getFiltersQuery(f filters.Args) (url.Values, error) { - query := url.Values{} - if f.Len() > 0 { - filterJSON, err := filters.ToJSON(f) - if err != nil { - return query, err - } - query.Set("filters", filterJSON) - } - return query, nil -} - // encodePlatforms marshals the given platform(s) to JSON format, to // be used for query-parameters for filtering / selecting platforms. func encodePlatforms(platform ...ocispec.Platform) ([]string, error) { @@ -94,3 +71,37 @@ func encodePlatform(platform *ocispec.Platform) (string, error) { } return string(p), nil } + +func decodeWithRaw[T any](resp *http.Response, out *T) (raw json.RawMessage, _ error) { + if resp == nil || resp.Body == nil { + return nil, errors.New("empty response") + } + defer ensureReaderClosed(resp) + + var buf bytes.Buffer + tr := io.TeeReader(resp.Body, &buf) + err := json.NewDecoder(tr).Decode(out) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// newCancelReadCloser wraps rc so it's automatically closed when ctx is canceled. +// Close is idempotent and returns the first error from rc.Close. +func newCancelReadCloser(ctx context.Context, rc io.ReadCloser) io.ReadCloser { + crc := &cancelReadCloser{ + rc: rc, + close: sync.OnceValue(rc.Close), + } + context.AfterFunc(ctx, func() { _ = crc.Close() }) + return crc +} + +type cancelReadCloser struct { + rc io.ReadCloser + close func() error +} + +func (c *cancelReadCloser) Read(p []byte) (int, error) { return c.rc.Read(p) } +func (c *cancelReadCloser) Close() error { return c.close() } diff --git a/vendor/github.com/moby/moby/client/version.go b/vendor/github.com/moby/moby/client/version.go new file mode 100644 index 000000000..7fa5a3fa0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/version.go @@ -0,0 +1,81 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/system" +) + +// ServerVersionOptions specifies options for the server version request. +type ServerVersionOptions struct { + // Currently no options are supported. +} + +// ServerVersionResult contains information about the Docker server host. +type ServerVersionResult struct { + // Platform is the platform (product name) the server is running on. + Platform PlatformInfo + + // Version is the version of the daemon. + Version string + + // APIVersion is the highest API version supported by the server. + APIVersion string + + // MinAPIVersion is the minimum API version the server supports. + MinAPIVersion string + + // Os is the operating system the server runs on. + Os string + + // Arch is the hardware architecture the server runs on. + Arch string + + // Experimental indicates that the daemon runs with experimental + // features enabled. + // + // Deprecated: this field will be removed in the next version. + Experimental bool + + // Components contains version information for the components making + // up the server. Information in this field is for informational + // purposes, and not part of the API contract. + Components []system.ComponentVersion +} + +// PlatformInfo holds information about the platform (product name) the +// server is running on. +type PlatformInfo struct { + // Name is the name of the platform (for example, "Docker Engine - Community", + // or "Docker Desktop 4.49.0 (208003)") + Name string +} + +// ServerVersion returns information of the Docker server host. +func (cli *Client) ServerVersion(ctx context.Context, _ ServerVersionOptions) (ServerVersionResult, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ServerVersionResult{}, err + } + + var v system.VersionResponse + err = json.NewDecoder(resp.Body).Decode(&v) + if err != nil { + return ServerVersionResult{}, err + } + + return ServerVersionResult{ + Platform: PlatformInfo{ + Name: v.Platform.Name, + }, + Version: v.Version, + APIVersion: v.APIVersion, + MinAPIVersion: v.MinAPIVersion, + Os: v.Os, + Arch: v.Arch, + Experimental: v.Experimental, //nolint:staticcheck // ignore deprecated field. + Components: v.Components, + }, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_create.go b/vendor/github.com/moby/moby/client/volume_create.go new file mode 100644 index 000000000..674e06335 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_create.go @@ -0,0 +1,42 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/volume" +) + +// VolumeCreateOptions specifies the options to create a volume. +type VolumeCreateOptions struct { + Name string + Driver string + DriverOpts map[string]string + Labels map[string]string + ClusterVolumeSpec *volume.ClusterVolumeSpec +} + +// VolumeCreateResult is the result of a volume creation. +type VolumeCreateResult struct { + Volume volume.Volume +} + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options VolumeCreateOptions) (VolumeCreateResult, error) { + createRequest := volume.CreateRequest{ + Name: options.Name, + Driver: options.Driver, + DriverOpts: options.DriverOpts, + Labels: options.Labels, + ClusterVolumeSpec: options.ClusterVolumeSpec, + } + resp, err := cli.post(ctx, "/volumes/create", nil, createRequest, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeCreateResult{}, err + } + + var v volume.Volume + err = json.NewDecoder(resp.Body).Decode(&v) + return VolumeCreateResult{Volume: v}, err +} diff --git a/vendor/github.com/moby/moby/client/volume_inspect.go b/vendor/github.com/moby/moby/client/volume_inspect.go new file mode 100644 index 000000000..cf00236a2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_inspect.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/volume" +) + +// VolumeInspectOptions holds options for inspecting a volume. +type VolumeInspectOptions struct { + // Add future optional parameters here +} + +// VolumeInspectResult holds the result from the [Client.VolumeInspect] method. +type VolumeInspectResult struct { + Volume volume.Volume + Raw json.RawMessage +} + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string, options VolumeInspectOptions) (VolumeInspectResult, error) { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return VolumeInspectResult{}, err + } + + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + return VolumeInspectResult{}, err + } + + var out VolumeInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Volume) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/volume_list.go b/vendor/github.com/moby/moby/client/volume_list.go new file mode 100644 index 000000000..989a0292e --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_list.go @@ -0,0 +1,46 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/volume" +) + +// VolumeListOptions holds parameters to list volumes. +type VolumeListOptions struct { + Filters Filters +} + +// VolumeListResult holds the result from the [Client.VolumeList] method. +type VolumeListResult struct { + // List of volumes. + Items []volume.Volume + + // Warnings that occurred when fetching the list of volumes. + Warnings []string +} + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, options VolumeListOptions) (VolumeListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/volumes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeListResult{}, err + } + + var apiResp volume.ListResponse + err = json.NewDecoder(resp.Body).Decode(&apiResp) + if err != nil { + return VolumeListResult{}, err + } + + return VolumeListResult{ + Items: apiResp.Volumes, + Warnings: apiResp.Warnings, + }, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_prune.go b/vendor/github.com/moby/moby/client/volume_prune.go new file mode 100644 index 000000000..561e328d7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_prune.go @@ -0,0 +1,55 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/volume" +) + +// VolumePruneOptions holds parameters to prune volumes. +type VolumePruneOptions struct { + // All controls whether named volumes should also be pruned. By + // default, only anonymous volumes are pruned. + All bool + + // Filters to apply when pruning. + Filters Filters +} + +// VolumePruneResult holds the result from the [Client.VolumePrune] method. +type VolumePruneResult struct { + Report volume.PruneReport +} + +// VolumePrune requests the daemon to delete unused data +func (cli *Client) VolumePrune(ctx context.Context, options VolumePruneOptions) (VolumePruneResult, error) { + if options.All { + if _, ok := options.Filters["all"]; ok { + return VolumePruneResult{}, errdefs.ErrInvalidArgument.WithMessage(`conflicting options: cannot specify both "all" and "all" filter`) + } + if options.Filters == nil { + options.Filters = Filters{} + } + options.Filters.Add("all", "true") + } + + query := url.Values{} + options.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumePruneResult{}, err + } + + var report volume.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return VolumePruneResult{}, fmt.Errorf("error retrieving volume prune report: %v", err) + } + + return VolumePruneResult{Report: report}, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_remove.go b/vendor/github.com/moby/moby/client/volume_remove.go new file mode 100644 index 000000000..0449e08d4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_remove.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "net/url" +) + +// VolumeRemoveOptions holds options for [Client.VolumeRemove]. +type VolumeRemoveOptions struct { + // Force the removal of the volume + Force bool +} + +// VolumeRemoveResult holds the result of [Client.VolumeRemove], +type VolumeRemoveResult struct { + // Add future fields here. +} + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, options VolumeRemoveOptions) (VolumeRemoveResult, error) { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return VolumeRemoveResult{}, err + } + + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeRemoveResult{}, err + } + return VolumeRemoveResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_update.go b/vendor/github.com/moby/moby/client/volume_update.go new file mode 100644 index 000000000..5aa2a0aa1 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_update.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" + "github.com/moby/moby/api/types/volume" +) + +// VolumeUpdateOptions holds options for [Client.VolumeUpdate]. +type VolumeUpdateOptions struct { + Version swarm.Version + // Spec is the ClusterVolumeSpec to update the volume to. + Spec *volume.ClusterVolumeSpec `json:"Spec,omitempty"` +} + +// VolumeUpdateResult holds the result of [Client.VolumeUpdate], +type VolumeUpdateResult struct { + // Add future fields here. +} + +// VolumeUpdate updates a volume. This only works for Cluster Volumes, and +// only some fields can be updated. +func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, options VolumeUpdateOptions) (VolumeUpdateResult, error) { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return VolumeUpdateResult{}, err + } + + query := url.Values{} + query.Set("version", options.Version.String()) + + resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeUpdateResult{}, err + } + return VolumeUpdateResult{}, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 86e1f5f33..bac662f3e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -15,8 +15,6 @@ github.com/containerd/errdefs ## explicit; go 1.22 github.com/containerd/errdefs/pkg/errhttp github.com/containerd/errdefs/pkg/internal/cause -# github.com/containerd/log v0.1.0 -## explicit; go 1.20 # github.com/containerd/stargz-snapshotter/estargz v0.16.3 ## explicit; go 1.22.0 github.com/containerd/stargz-snapshotter/estargz @@ -36,39 +34,12 @@ github.com/docker/cli/cli/config/types # github.com/docker/distribution v2.8.3+incompatible ## explicit github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v28.2.2+incompatible -## explicit -github.com/docker/docker/api -github.com/docker/docker/api/types -github.com/docker/docker/api/types/blkiodev -github.com/docker/docker/api/types/build -github.com/docker/docker/api/types/checkpoint -github.com/docker/docker/api/types/common -github.com/docker/docker/api/types/container -github.com/docker/docker/api/types/events -github.com/docker/docker/api/types/filters -github.com/docker/docker/api/types/image -github.com/docker/docker/api/types/mount -github.com/docker/docker/api/types/network -github.com/docker/docker/api/types/registry -github.com/docker/docker/api/types/storage -github.com/docker/docker/api/types/strslice -github.com/docker/docker/api/types/swarm -github.com/docker/docker/api/types/swarm/runtime -github.com/docker/docker/api/types/system -github.com/docker/docker/api/types/time -github.com/docker/docker/api/types/versions -github.com/docker/docker/api/types/volume -github.com/docker/docker/client -github.com/docker/docker/internal/lazyregexp -github.com/docker/docker/internal/multierror # github.com/docker/docker-credential-helpers v0.9.3 ## explicit; go 1.21 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials -# github.com/docker/go-connections v0.5.0 +# github.com/docker/go-connections v0.6.0 ## explicit; go 1.18 -github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig # github.com/docker/go-units v0.5.0 @@ -84,9 +55,6 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/gogo/protobuf v1.3.2 -## explicit; go 1.15 -github.com/gogo/protobuf/proto # github.com/google/go-cmp v0.7.0 ## explicit; go 1.21 github.com/google/go-cmp/cmp @@ -114,12 +82,31 @@ github.com/mitchellh/go-homedir # github.com/moby/docker-image-spec v1.3.1 ## explicit; go 1.18 github.com/moby/docker-image-spec/specs-go/v1 -# github.com/moby/sys/atomicwriter v0.1.0 -## explicit; go 1.18 -# github.com/moby/term v0.0.0-20221205130635-1aeaba878587 -## explicit; go 1.18 -# github.com/morikuni/aec v1.0.0 -## explicit +# github.com/moby/moby/api v1.52.0 +## explicit; go 1.24.0 +github.com/moby/moby/api/types +github.com/moby/moby/api/types/blkiodev +github.com/moby/moby/api/types/build +github.com/moby/moby/api/types/checkpoint +github.com/moby/moby/api/types/common +github.com/moby/moby/api/types/container +github.com/moby/moby/api/types/events +github.com/moby/moby/api/types/image +github.com/moby/moby/api/types/jsonstream +github.com/moby/moby/api/types/mount +github.com/moby/moby/api/types/network +github.com/moby/moby/api/types/plugin +github.com/moby/moby/api/types/registry +github.com/moby/moby/api/types/storage +github.com/moby/moby/api/types/swarm +github.com/moby/moby/api/types/system +github.com/moby/moby/api/types/volume +# github.com/moby/moby/client v0.1.0 +## explicit; go 1.24.0 +github.com/moby/moby/client +github.com/moby/moby/client/internal +github.com/moby/moby/client/internal/timestamp +github.com/moby/moby/client/pkg/versions # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest @@ -168,8 +155,6 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 -## explicit; go 1.22.7 # go.opentelemetry.io/otel/metric v1.36.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/metric @@ -204,8 +189,6 @@ golang.org/x/sync/errgroup golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac -## explicit # golang.org/x/tools v0.34.0 ## explicit; go 1.23.0 golang.org/x/tools/go/gcexportdata @@ -225,10 +208,6 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/protobuf v1.36.3 -## explicit; go 1.21 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# gotest.tools/v3 v3.0.3 -## explicit; go 1.11